text
stringlengths 0
1.25M
| meta
stringlengths 47
1.89k
|
---|---|
double precision function cwig3j (j1,j2,j3,m1,m2,ient)
c wigner 3j coefficient for integers (ient=1)
c or semiintegers (ient=2)
c other arguments should be multiplied by ient
implicit double precision (a-h,o-z)
parameter (idim = 58)
character*512 slog
c dimensions modified for larger arguments by ala 12.12.94
dimension al(idim+1),m(12)
save ini, al
data ini/1/
c idim-1 is the largest argument of factorial to calculate
m3=-m1-m2
if (ini.eq.0) goto 21
c initialisation of the log's of the factorials
ini=0
al(1)=0.0d00
do i=1,idim
b=i
al(i+1)=al(i)+ log(b)
enddo
21 continue
cwig3j=0.0d00
if (((ient-1)*(ient-2)).ne.0) go to 101
ii=ient+ient
c test triangular inequalities, parity and maximum values of m
if (( abs(m1)+ abs(m2)).eq.0.and.mod(j1+j2+j3,ii).ne.0) go to 99
m(1)=j1+j2-j3
m(2)=j2+j3-j1
m(3)=j3+j1-j2
m(4)=j1+m1
m(5)=j1-m1
m(6)=j2+m2
m(7)=j2-m2
m(8)=j3+m3
m(9)=j3-m3
m(10)=j1+j2+j3+ient
m(11)=j2-j3-m1
m(12)=j1-j3+m2
do i=1,12
if (i.gt.10) go to 31
if (m(i).lt.0) go to 99
31 if (mod(m(i),ient).ne.0) go to 101
m(i)=m(i)/ient
if (m(i).gt.idim) go to 101
enddo
c calculate 3j coefficient
max0= max(m(11),m(12),0)+1
min0= min(m(1),m(5),m(6))+1
isig=1
if (mod(max0-1,2).ne.0) isig=-isig
c=-al(m(10)+1)
do i=1,9
c=c+al(m(i)+1)
enddo
c=c/2.0d 00
do i=max0,min0
j=2-i
b=al(i)+al(j+m(1))+al(j+m(5))+
$ al(j+m(6))+al(i-m(11))+al(i-m(12))
cwig3j=cwig3j+isig* exp(c-b)
isig=-isig
enddo
if (mod(j1-j2-m3,ii).ne.0) cwig3j=-cwig3j
99 return
101 write(slog,'(a,6i5)') 'error in cwig3j ',j1,j2,j3,m1,m2,ient
call wlog(slog)
stop
end
| {"hexsha": "c279aff19a08ed01894bc8cce73d07aeaff0ddf2", "size": 1992, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "src/MATH/cwig3j.f", "max_stars_repo_name": "xraypy/feff85exafs", "max_stars_repo_head_hexsha": "ec8dcb07ca8ee034d0fa7431782074f0f65357a5", "max_stars_repo_licenses": ["BSD-2-Clause"], "max_stars_count": 11, "max_stars_repo_stars_event_min_datetime": "2016-01-05T21:29:09.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-11T08:59:17.000Z", "max_issues_repo_path": "src/MATH/cwig3j.f", "max_issues_repo_name": "xraypy/feff85exafs", "max_issues_repo_head_hexsha": "ec8dcb07ca8ee034d0fa7431782074f0f65357a5", "max_issues_repo_licenses": ["BSD-2-Clause"], "max_issues_count": 17, "max_issues_repo_issues_event_min_datetime": "2015-01-04T18:37:30.000Z", "max_issues_repo_issues_event_max_datetime": "2018-06-07T12:06:12.000Z", "max_forks_repo_path": "src/MATH/cwig3j.f", "max_forks_repo_name": "xraypy/feff85exafs", "max_forks_repo_head_hexsha": "ec8dcb07ca8ee034d0fa7431782074f0f65357a5", "max_forks_repo_licenses": ["BSD-2-Clause"], "max_forks_count": 6, "max_forks_repo_forks_event_min_datetime": "2016-01-05T21:29:26.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-04T13:11:01.000Z", "avg_line_length": 27.2876712329, "max_line_length": 70, "alphanum_fraction": 0.5175702811, "num_tokens": 806} |
import os
import numpy as np
import open3d as o3d
import glob
flip_transform = [[1, 0, 0, 0], [0, -1, 0, 0], [0, 0, -1, 0], [0, 0, 0, 1]]
def robust_kernel_fn(alpha, c):
if alpha == None:
# print('Binary truncation loss')
return lambda x: (np.abs(x) < c).astype(float)
if alpha == 2:
# print('L2 loss')
return lambda x: np.ones_like(x) / c**2
elif alpha == 0:
# print('Cauchy loss')
return lambda x: 2 / (x**2 + 2 * c**2)
elif alpha < -1e5:
# print('Welsch loss')
return lambda x: 1 / c**2 * np.exp(-0.5 * (x / c)**2)
else:
#if alpha == -2:
# print('Geman-McClure loss')
#elif alpha == 1:
# print('Charbonnier / Pseudo-Huber loss')
#else:
# print('General loss with alpha = ', alpha)
return lambda x: 1 / c**2 * np.float_power(
(x / c)**2 / np.abs(alpha - 2) + 1, alpha / 2 - 1)
def lineset_from_pose_graph(pose_graph, show_loops=True, edge_density=0.1, l = 0.1):
POINTS_PER_FRUSTUM = 5
EDGES_PER_FRUSTUM = 8
points = []
colors = []
lines = []
cnt = 0
for i, node in enumerate(pose_graph.nodes):
pose = np.array(node.pose)
#l = 0.1
points.append((pose @ np.array([0, 0, 0, 1]).T)[:3])
points.append((pose @ np.array([l, l, 2 * l, 1]).T)[:3])
points.append((pose @ np.array([l, -l, 2 * l, 1]).T)[:3])
points.append((pose @ np.array([-l, -l, 2 * l, 1]).T)[:3])
points.append((pose @ np.array([-l, l, 2 * l, 1]).T)[:3])
lines.append([cnt + 0, cnt + 1])
lines.append([cnt + 0, cnt + 2])
lines.append([cnt + 0, cnt + 3])
lines.append([cnt + 0, cnt + 4])
lines.append([cnt + 1, cnt + 2])
lines.append([cnt + 2, cnt + 3])
lines.append([cnt + 3, cnt + 4])
lines.append([cnt + 4, cnt + 1])
for i in range(0, EDGES_PER_FRUSTUM):
colors.append(np.array([0, 0, 1]))
cnt += POINTS_PER_FRUSTUM
print('nodes: {}'.format(len(pose_graph.nodes)))
loops = 0
random_index = np.random.choice(len(pose_graph.edges), int(len(pose_graph.edges)*edge_density), replace=False)
if show_loops:
for i, edge in enumerate(pose_graph.edges):
switch = np.random.rand(1) < edge_density
s = edge.source_node_id
t = edge.target_node_id
if (edge.uncertain & switch):
lines.append([POINTS_PER_FRUSTUM * s, POINTS_PER_FRUSTUM * t])
colors.append(np.array([0, 1, 0]))
loops += 1
elif not edge.uncertain:
lines.append([POINTS_PER_FRUSTUM * s, POINTS_PER_FRUSTUM * t])
colors.append(np.array([0, 0, 1]))
print('loops: {}'.format(loops))
lineset = o3d.geometry.LineSet()
lineset.points = o3d.utility.Vector3dVector(np.vstack(points))
lineset.lines = o3d.utility.Vector2iVector(np.vstack(lines).astype(int))
lineset.colors = o3d.utility.Vector3dVector(np.vstack(colors))
return lineset
def get_normal_map_o3d(data):
pcd = data.to_o3d_pointcloud()
pcd.estimate_normals()
normal_map = np.asarray(pcd.normals).reshape(data.xyz_im.shape)
normal_map = np.squeeze(normal_map)
return normal_map, pcd
def make_point_cloud(points, normals=None, colors=None):
pcd = o3d.geometry.PointCloud()
pcd.points = o3d.utility.Vector3dVector(points)
if normals is not None:
pcd.normals = o3d.utility.Vector3dVector(normals)
if colors is not None:
pcd.colors = o3d.utility.Vector3dVector(colors)
return pcd
def visualize_icp(pcd_source, pcd_target, T):
import copy
pcd_source = copy.deepcopy(pcd_source)
# pcd_source.paint_uniform_color([1, 0.706, 0])
# target_temp.paint_uniform_color([0, 0.651, 0.929])
pcd_source.paint_uniform_color([87.0 / 255.0, 144.0 / 255.0, 252.0 / 255.0])
pcd_target = copy.deepcopy(pcd_target)
pcd_target.paint_uniform_color([248.0 / 255.0, 156.0 / 255.0, 32.0 / 255.0])
pcd_source.transform(T)
o3d.visualization.draw([
pcd_source, pcd_target
# pcd_source.transform(flip_transform),
# pcd_target.transform(flip_transform)
])
def visualize_correspondences(source_points, target_points, T):
if len(source_points) != len(target_points):
print(
'Error! source points and target points has different length {} vs {}'
.format(len(source_points), len(target_points)))
return
pcd_source = make_point_cloud(source_points)
pcd_source.paint_uniform_color([1, 0, 0])
pcd_source.transform(T)
pcd_source.transform(flip_transform)
pcd_target = make_point_cloud(target_points)
pcd_target.paint_uniform_color([0, 1, 0])
pcd_target.transform(flip_transform)
corres = []
for k in range(len(source_points)):
corres.append((k, k))
lineset = o3d.geometry.LineSet.create_from_point_cloud_correspondences(
pcd_source, pcd_target, corres)
o3d.visualization.draw_geometries([pcd_source, pcd_target, lineset])
def make_point_cloud(points, normals=None, colors=None):
pcd = o3d.geometry.PointCloud()
pcd.points = o3d.utility.Vector3dVector(points)
if normals is not None:
pcd.normals = o3d.utility.Vector3dVector(normals)
if colors is not None:
pcd.colors = o3d.utility.Vector3dVector(colors)
return pcd
def load_range_file_names(config):
if not os.path.exists(config.path_dataset):
print(
'Path \'{}\' not found.'.format(config.path_dataset),
'Please provide --path_dataset in the command line or the config file.'
)
return [], []
range_folder = os.path.join(config.path_dataset, config.range_folder)
range_names = glob.glob(os.path.join(range_folder, '*.csv'))
n_range = len(range_names)
if n_range == 0:
print('Range files not found in {}, abort!'.format(range_folder))
return []
return sorted(range_names,
key=lambda x: int(x.split('/')[-1].split('.')[0]))
def load_depth_file_names(config):
if not os.path.exists(config.path_dataset):
print(
'Path \'{}\' not found.'.format(config.path_dataset),
'Please provide --path_dataset in the command line or the config file.'
)
return [], []
depth_folder = os.path.join(config.path_dataset, config.depth_folder)
range_names = glob.glob(os.path.join(depth_folder, '*.png'))
n_range = len(range_names)
if n_range == 0:
print('Range files not found in {}, abort!'.format(depth_folder))
return []
return sorted(range_names,
key=lambda x: int(x.split('/')[-1].split('.')[0]))
def load_fragment_file_names(config):
if not os.path.exists(config.path_dataset):
print(
'Path \'{}\' not found.'.format(config.path_dataset),
'Please provide --path_dataset in the command line or the config file.'
)
return [], []
fragment_folder = os.path.join(config.path_dataset, config.fragment_folder)
fragment_names = glob.glob(os.path.join(fragment_folder, '*.ply'))
n_fragments = len(fragment_names)
if n_fragments == 0:
print('Fragment point clouds not found in {}, abort!'.format(
depth_folder))
return []
return sorted(fragment_names)
def load_image_file_names(config):
if not os.path.exists(config.path_dataset):
print(
'Path \'{}\' not found.'.format(config.path_dataset),
'Please provide --path_dataset in the command line or the config file.'
)
return [], []
depth_folder = os.path.join(config.path_dataset, config.depth_folder)
color_folder = os.path.join(config.path_dataset, config.color_folder)
# Only 16-bit png depth is supported
depth_file_names = glob.glob(os.path.join(depth_folder, '*.png'))
n_depth = len(depth_file_names)
if n_depth == 0:
print('Depth image not found in {}, abort!'.format(depth_folder))
return [], []
# Try png
extensions = ['*.png', '*.jpg']
for ext in extensions:
color_file_names = glob.glob(os.path.join(color_folder, ext))
if len(color_file_names) == n_depth:
return sorted(depth_file_names), sorted(color_file_names)
print('Found {} depth images in {}, but cannot find matched number of '
'color images in {} with extensions {}, abort!'.format(
n_depth, depth_folder, color_folder, extensions))
return [], []
def load_intrinsic(config):
if config.path_intrinsic is None or config.path_intrinsic == '':
intrinsic = o3d.camera.PinholeCameraIntrinsic(
o3d.camera.PinholeCameraIntrinsicParameters.PrimeSenseDefault)
else:
intrinsic = o3d.io.read_pinhole_camera_intrinsic(config.path_intrinsic)
if config.engine == 'legacy':
return intrinsic
elif config.engine == 'tensor':
return o3d.core.Tensor(intrinsic.intrinsic_matrix,
o3d.core.Dtype.Float32)
else:
print('Unsupported engine {}'.format(config.engine))
def load_extrinsics(path_trajectory, config):
extrinsics = []
# For either a fragment or a scene
if path_trajectory.endswith('log'):
data = o3d.io.read_pinhole_camera_trajectory(path_trajectory)
for param in data.parameters:
extrinsics.append(param.extrinsic)
# Only for a fragment
elif path_trajectory.endswith('json'):
data = o3d.io.read_pose_graph(path_trajectory)
for node in data.nodes:
extrinsics.append(np.linalg.inv(node.pose))
if config.engine == 'legacy':
return extrinsics
elif config.engine == 'tensor':
return list(
map(lambda x: o3d.core.Tensor(x, o3d.core.Dtype.Float64),
extrinsics))
else:
print('Unsupported engine {}'.format(config.engine))
def save_poses(
path_trajectory,
poses,
intrinsic=o3d.camera.PinholeCameraIntrinsic(
o3d.camera.PinholeCameraIntrinsicParameters.PrimeSenseDefault)):
if path_trajectory.endswith('log'):
traj = o3d.camera.PinholeCameraTrajectory()
params = []
for pose in poses:
param = o3d.camera.PinholeCameraParameters()
param.intrinsic = intrinsic
param.extrinsic = np.linalg.inv(pose)
params.append(param)
traj.parameters = params
o3d.io.write_pinhole_camera_trajectory(path_trajectory, traj)
elif path_trajectory.endswith('json'):
pose_graph = o3d.pipelines.registration.PoseGraph()
for pose in poses:
node = o3d.pipelines.registration.PoseGraphNode()
node.pose = pose
pose_graph.nodes.append(node)
o3d.io.write_pose_graph(path_trajectory, pose_graph)
def init_volume(mode, config):
if config.engine == 'legacy':
return o3d.pipelines.integration.ScalableTSDFVolume(
voxel_length=config.voxel_size,
sdf_trunc=config.sdf_trunc,
color_type=o3d.pipelines.integration.TSDFVolumeColorType.RGB8)
elif config.engine == 'tensor':
if mode == 'scene':
block_count = config.block_count
else:
block_count = config.block_count
return o3d.t.geometry.TSDFVoxelGrid(
{
'tsdf': o3d.core.Dtype.Float32,
'weight': o3d.core.Dtype.UInt16,
'color': o3d.core.Dtype.UInt16
},
voxel_size=config.voxel_size,
sdf_trunc=config.sdf_trunc,
block_resolution=16,
block_count=block_count,
device=o3d.core.Device(config.device))
else:
print('Unsupported engine {}'.format(config.engine))
def extract_pointcloud(volume, config, file_name=None):
if config.engine == 'legacy':
mesh = volume.extract_triangle_mesh()
pcd = o3d.geometry.PointCloud()
pcd.points = mesh.vertices
pcd.colors = mesh.vertex_colors
if file_name is not None:
o3d.io.write_point_cloud(file_name, pcd)
elif config.engine == 'tensor':
pcd = volume.extract_surface_points(
weight_threshold=config.surface_weight_thr)
if file_name is not None:
o3d.io.write_point_cloud(file_name, pcd.to_legacy())
return pcd
def extract_trianglemesh(volume, config, file_name=None):
if config.engine == 'legacy':
mesh = volume.extract_triangle_mesh()
mesh.compute_vertex_normals()
mesh.compute_triangle_normals()
if file_name is not None:
o3d.io.write_triangle_mesh(file_name, mesh)
elif config.engine == 'tensor':
mesh = volume.extract_surface_mesh(
weight_threshold=config.surface_weight_thr)
mesh = mesh.to_legacy()
if file_name is not None:
o3d.io.write_triangle_mesh(file_name, mesh)
return mesh
| {"hexsha": "34ab2798f114dbf51a5f8ba609a11c4c7ec17089", "size": 13117, "ext": "py", "lang": "Python", "max_stars_repo_path": "dataloader/common.py", "max_stars_repo_name": "Kwonyoung-Ryu/DeepGlobalRegistration", "max_stars_repo_head_hexsha": "0045118d96182047f4c09c4c4fe2a1b2b527cc5f", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "dataloader/common.py", "max_issues_repo_name": "Kwonyoung-Ryu/DeepGlobalRegistration", "max_issues_repo_head_hexsha": "0045118d96182047f4c09c4c4fe2a1b2b527cc5f", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "dataloader/common.py", "max_forks_repo_name": "Kwonyoung-Ryu/DeepGlobalRegistration", "max_forks_repo_head_hexsha": "0045118d96182047f4c09c4c4fe2a1b2b527cc5f", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.6333333333, "max_line_length": 114, "alphanum_fraction": 0.6232370207, "include": true, "reason": "import numpy", "num_tokens": 3253} |
% All comment lines start with %
% There are no multi-line comments
% LaTeX is NOT a "What You See Is What You Get" word processing software like
% MS Word, or OpenOffice Writer
% Every LaTeX command starts with a backslash (\)
% LaTeX documents start with a defining the type of document it's compiling
% Other document types include book, report, presentations, etc.
% The options for the document appear in the [] brackets. In this case
% it specifies we want to use 12pt font.
\documentclass[12pt]{article}
% Next we define the packages the document uses.
% If you want to include graphics, colored text, or
% source code from another language file into your document,
% you need to enhance the capabilities of LaTeX. This is done by adding packages.
% I'm going to include the float and caption packages for figures
% and hyperref package for hyperlinks
\usepackage{caption}
\usepackage{float}
\usepackage{hyperref}
% We can define some other document properties too!
\author{Chaitanya Krishna Ande, Colton Kohnke, Sricharan Chiruvolu \& \\
Svetlana Golubeva}
\date{\today}
\title{Learn \LaTeX \hspace{1pt} in Y Minutes!}
% Now we're ready to begin the document
% Everything before this line is called "The Preamble"
\begin{document}
% if we set the author, date, title fields, we can have LaTeX
% create a title page for us.
\maketitle
% If we have sections, we can create table of contents. We have to compile our
% document twice to make it appear in right order.
% It is a good practice to separate the table of contents form the body of the
% document. To do so we use \newpage command
\newpage
\tableofcontents
\newpage
% Most research papers have abstract, you can use the predefined commands for this.
% This should appear in its logical order, therefore, after the top matter,
% but before the main sections of the body.
% This command is available in the document classes article and report.
\begin{abstract}
\LaTeX \hspace{1pt} documentation written as \LaTeX! How novel and totally not
my idea!
\end{abstract}
% Section commands are intuitive.
% All the titles of the sections are added automatically to the table of contents.
\section{Introduction}
Hello, my name is Colton and together we're going to explore \LaTeX!
\section{Another section}
This is the text for another section. I think it needs a subsection.
\subsection{This is a subsection} % Subsections are also intuitive.
I think we need another one
\subsubsection{Pythagoras}
Much better now.
\label{subsec:pythagoras}
% By using the asterisk we can suppress LaTeX's inbuilt numbering.
% This works for other LaTeX commands as well.
\section*{This is an unnumbered section}
However not all sections have to be numbered!
\section{Some Text notes}
%\section{Spacing} % Need to add more information about space intervals
\LaTeX \hspace{1pt} is generally pretty good about placing text where it should
go. If
a line \\ needs \\ to \\ break \\ you add \textbackslash\textbackslash
\hspace{1pt} to the source code. \\
\section{Lists}
Lists are one of the easiest things to create in \LaTeX! I need to go shopping
tomorrow, so let's make a grocery list.
\begin{enumerate} % This creates an "enumerate" environment.
% \item tells the enumerate to increment
\item Salad.
\item 27 watermelon.
\item A single jackrabbit.
% we can even override the item number by using []
\item[how many?] Medium sized squirt guns.
Not a list item, but still part of the enumerate.
\end{enumerate} % All environments must have an end.
\section{Math}
One of the primary uses for \LaTeX \hspace{1pt} is to produce academic articles
or technical papers. Usually in the realm of math and science. As such,
we need to be able to add special symbols to our paper! \\
Math has many symbols, far beyond what you can find on a keyboard;
Set and relation symbols, arrows, operators, and Greek letters to name a few.\\
Sets and relations play a vital role in many mathematical research papers.
Here's how you state all x that belong to X, $\forall$ x $\in$ X. \\
% Notice how I needed to add $ signs before and after the symbols. This is
% because when writing, we are in text-mode.
% However, the math symbols only exist in math-mode.
% We can enter math-mode from text mode with the $ signs.
% The opposite also holds true. Variable can also be rendered in math-mode.
% We can also enter math mode with \[\]
\[a^2 + b^2 = c^2 \]
My favorite Greek letter is $\xi$. I also like $\beta$, $\gamma$ and $\sigma$.
I haven't found a Greek letter yet that \LaTeX \hspace{1pt} doesn't know
about! \\
Operators are essential parts of a mathematical document:
trigonometric functions ($\sin$, $\cos$, $\tan$),
logarithms and exponentials ($\log$, $\exp$),
limits ($\lim$), etc.
have per-defined LaTeX commands.
Let's write an equation to see how it's done:
$\cos(2\theta) = \cos^{2}(\theta) - \sin^{2}(\theta)$ \\
Fractions (Numerator-denominators) can be written in these forms:
% 10 / 7
$$ ^{10}/_{7} $$
% Relatively complex fractions can be written as
% \frac{numerator}{denominator}
$$ \frac{n!}{k!(n - k)!} $$ \\
We can also insert equations in an ``equation environment''.
% Display math with the equation 'environment'
\begin{equation} % enters math-mode
c^2 = a^2 + b^2.
\label{eq:pythagoras} % for referencing
\end{equation} % all \begin statements must have an end statement
We can then reference our new equation!
Eqn.~\ref{eq:pythagoras} is also known as the Pythagoras Theorem which is also
the subject of Sec.~\ref{subsec:pythagoras}. A lot of things can be labeled:
figures, equations, sections, etc.
Summations and Integrals are written with sum and int commands:
% Some LaTeX compilers will complain if there are blank lines
% In an equation environment.
\begin{equation}
\sum_{i=0}^{5} f_{i}
\end{equation}
\begin{equation}
\int_{0}^{\infty} \mathrm{e}^{-x} \mathrm{d}x
\end{equation}
\section{Figures}
Let's insert a Figure. Figure placement can get a little tricky.
I definitely have to lookup the placement options each time.
\begin{figure}[H] % H here denoted the placement option.
\centering % centers the figure on the page
% Inserts a figure scaled to 0.8 the width of the page.
%\includegraphics[width=0.8\linewidth]{right-triangle.png}
% Commented out for compilation purposes. Please use your imagination.
\caption{Right triangle with sides $a$, $b$, $c$}
\label{fig:right-triangle}
\end{figure}
\subsection{Table}
We can also insert Tables in the same way as figures.
\begin{table}[H]
\caption{Caption for the Table.}
% the {} arguments below describe how each row of the table is drawn.
% Again, I have to look these up. Each. And. Every. Time.
\begin{tabular}{c|cc}
Number & Last Name & First Name \\ % Column rows are separated by &
\hline % a horizontal line
1 & Biggus & Dickus \\
2 & Monty & Python
\end{tabular}
\end{table}
\section{Getting \LaTeX \hspace{1pt} to not compile something (i.e. Source Code)}
Let's say we want to include some code into our \LaTeX \hspace{1pt} document,
we would then need \LaTeX \hspace{1pt} to not try and interpret that text and
instead just print it to the document. We do this with a verbatim
environment.
% There are other packages that exist (i.e. minty, lstlisting, etc.)
% but verbatim is the bare-bones basic one.
\begin{verbatim}
print("Hello World!")
a%b; % look! We can use % signs in verbatim.
random = 4; #decided by fair random dice roll
\end{verbatim}
\section{Compiling}
By now you're probably wondering how to compile this fabulous document
and look at the glorious glory that is a \LaTeX \hspace{1pt} pdf.
(yes, this document actually does compile). \\
Getting to the final document using \LaTeX \hspace{1pt} consists of the following
steps:
\begin{enumerate}
\item Write the document in plain text (the ``source code'').
\item Compile source code to produce a pdf.
The compilation step looks like this (in Linux): \\
\begin{verbatim}
> pdflatex learn-latex.tex
\end{verbatim}
\end{enumerate}
A number of \LaTeX \hspace{1pt}editors combine both Step 1 and Step 2 in the
same piece of software. So, you get to see Step 1, but not Step 2 completely.
Step 2 is still happening behind the scenes\footnote{In cases, where you use
references (like Eqn.~\ref{eq:pythagoras}), you may need to run Step 2
multiple times, to generate an intermediary *.aux file.}.
% Also, this is how you add footnotes to your document!
You write all your formatting information in plain text in Step 1.
The compilation part in Step 2 takes care of producing the document in the
format you defined in Step 1.
\section{Hyperlinks}
We can also insert hyperlinks in our document. To do so we need to include the
package hyperref into preamble with the command:
\begin{verbatim}
\usepackage{hyperref}
\end{verbatim}
There exists two main types of links: visible URL \\
\url{https://learnxinyminutes.com/docs/latex/}, or
\href{https://learnxinyminutes.com/docs/latex/}{shadowed by text}
% You can not add extra-spaces or special symbols into shadowing text since it
% will cause mistakes during the compilation
This package also produces list of thumbnails in the output pdf document and
active links in the table of contents.
\section{End}
That's all for now!
% Most often, you would want to have a references section in your document.
% The easiest way to set this up would be by using the bibliography section
\begin{thebibliography}{1}
% similar to other lists, the \bibitem command can be used to list items
% each entry can then be cited directly in the body of the text
\bibitem{latexwiki} The amazing \LaTeX \hspace{1pt} wikibook: {\em
https://en.wikibooks.org/wiki/LaTeX}
\bibitem{latextutorial} An actual tutorial: {\em http://www.latex-tutorial.com}
\end{thebibliography}
% end the document
\end{document}
| {"hexsha": "d051795f4e26196501737de941155ea3b044b9dc", "size": 9934, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "samples/Misc/learn-latex.tex", "max_stars_repo_name": "MateoCerquetella/dark-plus-syntax", "max_stars_repo_head_hexsha": "c5afd61a0b7a0081b6e4c070a49cde71abfd3dce", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 44, "max_stars_repo_stars_event_min_datetime": "2018-06-17T03:37:04.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-31T06:28:02.000Z", "max_issues_repo_path": "samples/Misc/learn-latex.tex", "max_issues_repo_name": "MateoCerquetella/dark-plus-syntax", "max_issues_repo_head_hexsha": "c5afd61a0b7a0081b6e4c070a49cde71abfd3dce", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 18, "max_issues_repo_issues_event_min_datetime": "2018-04-01T10:25:20.000Z", "max_issues_repo_issues_event_max_datetime": "2021-06-24T09:38:06.000Z", "max_forks_repo_path": "samples/Misc/learn-latex.tex", "max_forks_repo_name": "MateoCerquetella/dark-plus-syntax", "max_forks_repo_head_hexsha": "c5afd61a0b7a0081b6e4c070a49cde71abfd3dce", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 18, "max_forks_repo_forks_event_min_datetime": "2018-07-27T17:08:54.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-20T09:08:53.000Z", "avg_line_length": 37.2059925094, "max_line_length": 83, "alphanum_fraction": 0.7373666197, "num_tokens": 2645} |
# Problem: https://projecteuler.net/problem=227
# Define distance as the clockwise number of people between the dice.
# Use the distance between the dices as a state.
# Use Markov chain to track the probabilities.
# T[distance1][distance2] is the probability of transitioning from distance 1 to distance 2
import numpy as np
N = 100
def position_to_index(x, y):
return x * N + y
def index_to_position(idx):
return idx // N, idx % N
def new_position(current_position, roll):
if roll == 1:
current_position = (current_position - 1) % N
elif roll == 6:
current_position = (current_position + 1) % N
return current_position
if __name__ == "__main__":
ans = 0.0
T = np.zeros((N, N), dtype = np.double)
S = np.zeros((N), dtype = np.double)
S[50] = 1.0
T[0][0] = 1.0
for delta in range(1, N):
# player 1 rolls 1, player 2 rolls 1: delta doesn't change
# player 1 rolls 1, player 2 rolls [2..5]
T[delta][(delta-1)%N] += 4/36
# player 1 rolls 1, player 2 rolls 6
T[delta][(delta-2)%N] += 1/36
# player 1 rolls [2..5], player 2 rolls 1
T[delta][(delta+1)%N] += 4/36
# player 1 rolls [2..5], player 2 rolls 6
T[delta][(delta-1)%N] += 4/36
# player 1 rolls 6, player 2 rolls 1
T[delta][(delta+2)%N] += 1/36
# player 1 rolls 6, player 2 rolls [2..5]
T[delta][(delta+1)%N] += 4/36
# player 1 rolls 6, player 2 rolls 6: delta doesn't change
T[delta][delta] = 1.0-np.sum(T[delta])
prev_probability = 0.0
curr_probability = 0.0
prev_expectation = -1.0
curr_expectation = 0.0
tol = 1e-12
n_turns = 0
while (((curr_expectation - prev_expectation) > tol) or (curr_probability < tol)):
n_turns = n_turns + 1
prev_probability = curr_probability
prev_expectation = curr_expectation
S = S.dot(T)
curr_probability = S[0]
curr_expectation = curr_expectation + n_turns * (curr_probability - prev_probability)
ans = curr_expectation
print("{:.6f}".format(ans))
| {"hexsha": "b0aa6751c2d5f0e6b1fb3ed7b10e7e2ec35beb11", "size": 2199, "ext": "py", "lang": "Python", "max_stars_repo_path": "3rd_100/problem227_improved.py", "max_stars_repo_name": "takekoputa/project-euler", "max_stars_repo_head_hexsha": "6f434be429bd26f5d0f84f5ab0f5fa2bd677c790", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "3rd_100/problem227_improved.py", "max_issues_repo_name": "takekoputa/project-euler", "max_issues_repo_head_hexsha": "6f434be429bd26f5d0f84f5ab0f5fa2bd677c790", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "3rd_100/problem227_improved.py", "max_forks_repo_name": "takekoputa/project-euler", "max_forks_repo_head_hexsha": "6f434be429bd26f5d0f84f5ab0f5fa2bd677c790", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-11-02T12:08:46.000Z", "max_forks_repo_forks_event_max_datetime": "2021-11-02T12:08:46.000Z", "avg_line_length": 27.835443038, "max_line_length": 94, "alphanum_fraction": 0.58526603, "include": true, "reason": "import numpy", "num_tokens": 670} |
import os
import logging
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3"
logging.getLogger("tensorflow").setLevel(logging.CRITICAL)
logging.getLogger("tensorflow_hub").setLevel(logging.CRITICAL)
import shutil
import numpy as np
from perform.constants import REAL_TYPE, PARAM_INPUTS, ROM_INPUTS
# Constants to use throughout testing
# sample air chemistry dictionary
CHEM_DICT_AIR = {}
CHEM_DICT_AIR["gas_model"] = "cpg"
CHEM_DICT_AIR["num_species"] = 3
CHEM_DICT_AIR["mol_weights"] = np.array([32.0, 28.0, 40.0], dtype=REAL_TYPE)
CHEM_DICT_AIR["species_names"] = np.array(["oxygen", "nitrogen", "argon"])
CHEM_DICT_AIR["enth_ref"] = np.array([0.0, 0.0, 0.0], dtype=REAL_TYPE)
CHEM_DICT_AIR["cp"] = np.array([918.0, 1040.0, 520.3], dtype=REAL_TYPE)
CHEM_DICT_AIR["pr"] = np.array([0.730, 0.718, 0.687], dtype=REAL_TYPE)
CHEM_DICT_AIR["sc"] = np.array([0.612, 0.612, 0.612], dtype=REAL_TYPE)
CHEM_DICT_AIR["mu_ref"] = np.array([2.07e-5, 1.76e-5, 2.27e-5], dtype=REAL_TYPE)
CHEM_DICT_AIR["temp_ref"] = np.array([0.0, 0.0, 0.0], dtype=REAL_TYPE)
# sample reactant-product chemistry and reaction dictionaries
CHEM_DICT_REACT = {}
CHEM_DICT_REACT["gas_model"] = "cpg"
CHEM_DICT_REACT["num_species"] = 2
CHEM_DICT_REACT["mol_weights"] = np.array([21.32, 21.32], dtype=REAL_TYPE)
CHEM_DICT_REACT["species_names"] = np.array(["Reactant", "Product"])
CHEM_DICT_REACT["enth_ref"] = np.array([-7.4320e6, -10.8e6], dtype=REAL_TYPE)
CHEM_DICT_REACT["cp"] = np.array([1538.22, 1538.22], dtype=REAL_TYPE)
CHEM_DICT_REACT["pr"] = np.array([0.713, 0.713], dtype=REAL_TYPE)
CHEM_DICT_REACT["sc"] = np.array([0.62, 0.62], dtype=REAL_TYPE)
CHEM_DICT_REACT["mu_ref"] = np.array([7.35e-4, 7.35e-4], dtype=REAL_TYPE)
CHEM_DICT_REACT["temp_ref"] = np.array([0.0, 0.0], dtype=REAL_TYPE)
CHEM_DICT_REACT["reaction_model"] = "fr_irrev"
CHEM_DICT_REACT["num_reactions"] = 1
CHEM_DICT_REACT["nu"] = [[1.0, -1.0]]
CHEM_DICT_REACT["nu_arr"] = [[1.0, 0.0]]
CHEM_DICT_REACT["pre_exp_fact"] = [2.12e10]
CHEM_DICT_REACT["temp_exp"] = [0.0]
CHEM_DICT_REACT["act_energy"] = [2.025237e8]
# consistent primitive initial conditions
SOL_PRIM_IN_AIR = np.array(
[
[1e6, 1e5],
[2.0, 1.0],
[300.0, 400.0],
[0.4, 0.6],
[0.6, 0.4],
]
)
SOL_PRIM_IN_REACT = np.array(
[
[1e6, 9e5],
[2.0, 1.0],
[1000.0, 1200.0],
[0.6, 0.4],
]
)
# generate directory which acts as PERFORM working directory
TEST_DIR = "test_dir"
def gen_test_dir():
if os.path.isdir(TEST_DIR):
shutil.rmtree(TEST_DIR)
os.mkdir(TEST_DIR)
# delete working directory on cleanup
def del_test_dir():
if os.path.isdir(TEST_DIR):
shutil.rmtree(TEST_DIR)
# get output mode and directory
def get_output_mode():
output_mode = bool(int(os.environ["PERFORM_TEST_OUTPUT_MODE"]))
output_dir = os.environ["PERFORM_TEST_OUTPUT_DIR"]
return output_mode, output_dir
# sample input files necessary for solution domain initialization
def solution_domain_setup(run_dir=TEST_DIR):
# generate mesh file
mesh_file = os.path.join(run_dir, "mesh.inp")
with open(mesh_file, "w") as f:
f.write("x_left = 0.0\n")
f.write("x_right = 2e-5\n")
f.write("num_cells = 2\n")
# generate chemistry file
chem_file = os.path.join(run_dir, "chem.inp")
with open(chem_file, "w") as f:
for key, item in CHEM_DICT_REACT.items():
if isinstance(item, str):
f.write(key + ' = "' + str(item) + '"\n')
elif isinstance(item, list) or isinstance(item, np.ndarray):
f.write(key + " = [" + ",".join(str(val) for val in item) + "]\n")
else:
f.write(key + " = " + str(item) + "\n")
# generate solver input files
inp_file = os.path.join(run_dir, PARAM_INPUTS)
with open(inp_file, "w") as f:
f.write("stdout = False \n")
f.write('chem_file = "./chem.inp" \n')
f.write('mesh_file = "./mesh.inp" \n')
f.write('init_file = "test_init_file.npy" \n')
f.write("dt = 1e-7 \n")
f.write('time_scheme = "bdf" \n')
f.write("adapt_dtau = True \n")
f.write("time_order = 2 \n")
f.write("num_steps = 10 \n")
f.write("res_tol = 1e-11 \n")
f.write('invisc_flux_scheme = "roe" \n')
f.write('visc_flux_scheme = "standard" \n')
f.write("space_order = 2 \n")
f.write('grad_limiter = "barth_face" \n')
f.write('bound_cond_inlet = "meanflow" \n')
f.write("press_inlet = 1003706.0 \n")
f.write("temp_inlet = 1000.0 \n")
f.write("vel_inlet = 1853.0 \n")
f.write("rho_inlet = 3944.0 \n")
f.write("mass_fracs_inlet = [0.6, 0.4] \n")
f.write('bound_cond_outlet = "meanflow" \n')
f.write("press_outlet = 898477.0 \n")
f.write("vel_outlet = 1522.0 \n")
f.write("rho_outlet = 2958.0 \n")
f.write("mass_fracs_outlet = [0.4, 0.6] \n")
f.write("probe_locs = [-1.0, 5e-6, 1.0] \n")
f.write('probe_vars = ["pressure", "temperature", "species-0", "density"] \n')
f.write("save_restarts = True \n")
f.write("restart_interval = 5 \n")
f.write("out_interval = 2 \n")
f.write("out_itmdt_interval = 5 \n")
f.write("prim_out = True \n")
f.write("cons_out = True \n")
f.write("source_out = True \n")
f.write("hr_out = True \n")
f.write("rhs_out = True \n")
f.write("vis_show = False \n")
f.write("vis_save = True \n")
f.write("vis_interval = 3 \n")
f.write('vis_type_0 = "field" \n')
f.write('vis_var_0 = ["temperature", "density", "pressure", "species-0"] \n')
f.write("vis_y_bounds_0 = [[500, 1500], [1.8, 2.6], [1.2e6, 8e5], [-0.1, 1.1]] \n")
f.write('vis_type_1 = "probe" \n')
f.write('vis_var_1 = ["temperature", "density", "pressure", "species-0"] \n')
f.write("vis_y_bounds_1 = [[500, 1500], [1.8, 2.6], [1.2e6, 8e5], [-0.1, 1.1]] \n")
f.write("probe_num_1 = 1 \n")
np.save(os.path.join(run_dir, "test_init_file.npy"), SOL_PRIM_IN_REACT)
def rom_domain_setup(run_dir=TEST_DIR, method="galerkin", space_mapping="linear", var_mapping="conservative"):
from tensorflow.keras import Model, Input
from tensorflow.keras.layers import Dense
from tensorflow.keras.initializers import Identity
assert method in ["galerkin", "lspg", "mplsvt"]
assert space_mapping in ["linear", "autoencoder"]
assert var_mapping in ["conservative", "primitive"]
# presumably this has already been generated
inp_file = os.path.join(run_dir, PARAM_INPUTS)
with open(inp_file, "a") as f:
f.write("calc_rom = True \n")
# generate ROM input file
inp_file = os.path.join(run_dir, ROM_INPUTS)
model_dir = os.path.join(run_dir, "model_files")
os.mkdir(model_dir)
with open(inp_file, "w") as f:
f.write('rom_method = "' + method + '" \n')
f.write('var_mapping = "' + var_mapping + '" \n')
f.write('space_mapping = "' + space_mapping + '" \n')
f.write("num_models = 1 \n")
f.write("latent_dims = [8] \n")
f.write("model_var_idxs = [[0, 1, 2, 3]] \n")
f.write('model_dir = "' + model_dir + '" \n')
f.write('basis_files = ["spatial_modes.npy"] \n')
f.write('decoder_files = ["decoder.h5", ] \n')
f.write('encoder_files = ["encoder.h5", ] \n')
f.write('cent_prim = ["cent_prof_prim.npy"] \n')
f.write('cent_cons = ["cent_prof_cons.npy"] \n')
f.write('norm_sub_prim = ["norm_sub_prof_prim.npy"] \n')
f.write('norm_fac_prim = ["norm_fac_prof_prim.npy"] \n')
f.write('norm_sub_cons = ["norm_sub_prof_cons.npy"] \n')
f.write('norm_fac_cons = ["norm_fac_prof_cons.npy"] \n')
f.write("run_gpu = False \n")
f.write('ml_library = "tfkeras" \n')
f.write("decoder_isconv = False \n")
f.write("encoder_isconv = False \n")
# generate model files, standardization profiles
modes = np.reshape(np.eye(8), (4, 2, 8))
cent_prof = np.zeros((4, 2), dtype=REAL_TYPE)
norm_sub_prof = np.zeros((4, 2), dtype=REAL_TYPE)
norm_fac_prof = np.ones((4, 2), dtype=REAL_TYPE)
# TensorFlow model setup
input_layer = Input(shape=(8,), batch_size=None)
output_layer = Dense(8, activation="linear", use_bias=False, kernel_initializer=Identity)(input_layer)
tf_model = Model(input_layer, output_layer)
np.save(os.path.join(model_dir, "spatial_modes.npy"), modes)
np.save(os.path.join(model_dir, "cent_prof_prim.npy"), cent_prof)
np.save(os.path.join(model_dir, "cent_prof_cons.npy"), cent_prof)
np.save(os.path.join(model_dir, "norm_sub_prof_prim.npy"), norm_sub_prof)
np.save(os.path.join(model_dir, "norm_fac_prof_prim.npy"), norm_fac_prof)
np.save(os.path.join(model_dir, "norm_sub_prof_cons.npy"), norm_sub_prof)
np.save(os.path.join(model_dir, "norm_fac_prof_cons.npy"), norm_fac_prof)
tf_model.save(os.path.join(model_dir, "decoder.h5"), save_format="h5")
tf_model.save(os.path.join(model_dir, "encoder.h5"), save_format="h5")
| {"hexsha": "c05a8d607bddab6a3eeadebdbcde66a638550f6b", "size": 9154, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/integration_tests/constants.py", "max_stars_repo_name": "cwentland0/perform", "max_stars_repo_head_hexsha": "e08771cb776a7e6518c43350746e2ca72f79b153", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 6, "max_stars_repo_stars_event_min_datetime": "2021-03-24T21:42:06.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-28T20:00:13.000Z", "max_issues_repo_path": "tests/integration_tests/constants.py", "max_issues_repo_name": "cwentland0/perform", "max_issues_repo_head_hexsha": "e08771cb776a7e6518c43350746e2ca72f79b153", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 38, "max_issues_repo_issues_event_min_datetime": "2021-04-15T15:30:21.000Z", "max_issues_repo_issues_event_max_datetime": "2022-01-29T01:23:57.000Z", "max_forks_repo_path": "tests/integration_tests/constants.py", "max_forks_repo_name": "cwentland0/perform", "max_forks_repo_head_hexsha": "e08771cb776a7e6518c43350746e2ca72f79b153", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-07-03T03:13:36.000Z", "max_forks_repo_forks_event_max_datetime": "2021-07-03T03:13:36.000Z", "avg_line_length": 39.9737991266, "max_line_length": 110, "alphanum_fraction": 0.6273760105, "include": true, "reason": "import numpy", "num_tokens": 2873} |
#!/usr/bin/env python
"""
Author: Devansh Shukla
"""
import pandas as pd
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
from matplotlib.animation import FuncAnimation, FFMpegWriter
import matplotlib.gridspec as gridspec
custom_rcparams = {
"axes.labelsize": 7,
"axes.titlesize": 8,
"axes.grid": True,
# Figure
"figure.autolayout": True,
"figure.titlesize": 9,
"figure.figsize": (10, 3),
# "figure.dpi": 150,
"savefig.format": "pdf",
"lines.linewidth": 1,
# Legend
"legend.fontsize": 8,
"legend.frameon": True,
# Ticks
"xtick.labelsize": 8,
"ytick.labelsize": 8,
"xtick.minor.visible": True,
"xtick.direction": "in",
"ytick.direction": "in",
"ytick.minor.visible": True,
}
mpl.rcParams.update(custom_rcparams)
# t, x, y, vx, vy
df = pd.read_csv("Particle1D.dat", engine="python", delimiter=" ", header=None, skipinitialspace=True, comment="#")
print(df)
time = df[0].values
pos_x = df[1].values
vel_x = df[2].values
gs = gridspec.GridSpec(1, 2, width_ratios=[2, 1], hspace=0)
fig = plt.figure()
ax1 = plt.subplot(gs[0, 0])
ax2 = plt.subplot(gs[0, 1])
plt.tight_layout()
line1, = ax1.plot([], [], 'o', lw=2, label="particle")
trace, = ax1.plot([], [], '-', lw=1, label="trace")
time_template = "time = %.2fs"
time_text = ax1.text(0.05, 0.8, '', transform=ax1.transAxes)
line_arrow = ax1.plot([], [], "-", color="C4", label=r"$v_x$")
patch = plt.Arrow(pos_x[0], 0, vel_x[0], 0, width=0.15, color="C4")
ax1.add_patch(patch)
line_vx, = ax2.plot([], [], '-', lw=2, label=r"$v_{x}(t)$")
ax2.legend()
line = [line1, line_vx, ]
ax1.set_xlim(left=pos_x.min()-1, right=pos_x.max()+1)
ax1.set_ylim(-2, 2)
ax1.vlines(pos_x.max(), -2, 2, "red", label=rf"$x={pos_x.max()}$")
ax1.vlines(pos_x.min(), -2, 2, "red", label=rf"$x={pos_x.min()}$")
ax1.set_xlabel(r"$X$", labelpad=-0.5)
ax1.set_ylabel(r"$Y$", labelpad=-0.5)
ax1.legend()
ax2.set_xlim(0, time[-1]+1)
ax2.set_ylim(-1.5, 1.5)
ax2.set_xlabel(r"$Time(s)$", labelpad=0)
ax2.set_ylabel(r"$v(m/s)$", labelpad=0)
def init():
line[0].set_data([], [])
line[1].set_data([], [])
trace.set_data([], [])
return line, trace
def animate(i):
global time, pos_x, vel_x
line[0].set_data(pos_x[i], 0)
trace.set_data(pos_x[:i], 0)
time_text.set_text(time_template % (time[i]))
line[1].set_data(time[:i], vel_x[:i])
global ax1, patch
ax1.patches.remove(patch)
patch = plt.Arrow(pos_x[i], 0, vel_x[i], 0, width=0.15, color="C4")
ax1.add_patch(patch)
return line, trace, time_text
def toggle_capture(*args, **kwargs):
global ani, capture_no
ani.pause()
plt.gcf().savefig(f"plots/1d_{capture_no}.pdf")
capture_no += 1
ani.resume()
capture_no = 0
ani = FuncAnimation(fig, animate, frames=len(time), interval=10, init_func=init, blit=False, repeat=False)
fig.canvas.mpl_connect('button_press_event', toggle_capture)
writer = FFMpegWriter(fps=10)
ani.save('animation.mp4', writer=writer)
plt.show()
| {"hexsha": "13be23a9d1afe060e4d8583ab13e4743506f3902", "size": 3019, "ext": "py", "lang": "Python", "max_stars_repo_path": "M6/AnimationParticle.py", "max_stars_repo_name": "devanshshukla99/MP409-Computational-Physics-Lab", "max_stars_repo_head_hexsha": "938318cb62f1b89a7bc57e07f9b5ac6f2b689fe4", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "M6/AnimationParticle.py", "max_issues_repo_name": "devanshshukla99/MP409-Computational-Physics-Lab", "max_issues_repo_head_hexsha": "938318cb62f1b89a7bc57e07f9b5ac6f2b689fe4", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "M6/AnimationParticle.py", "max_forks_repo_name": "devanshshukla99/MP409-Computational-Physics-Lab", "max_forks_repo_head_hexsha": "938318cb62f1b89a7bc57e07f9b5ac6f2b689fe4", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 27.1981981982, "max_line_length": 117, "alphanum_fraction": 0.6402782378, "include": true, "reason": "import numpy", "num_tokens": 1003} |
import logging
import os
import pandas as pd
import numpy as np
import itertools as it
import xgboost as xgb
class XGB(object):
def __init__(self, obj):
self.master = obj
for key, val in vars(obj).items():
setattr(self, key, val)
base_for = "ACGT"
base_rev = "TGCA"
self.comp_tab = str.maketrans(base_for, base_rev)
def load_xgb_model(self):
logging.debug('Loading xgboost model')
bst = xgb.Booster({'nthread':self.threads})
bst.load_model(self.xgb)
self.bst = bst
# Load label dict here:
with open(self.typedict, 'r') as f:
rs = (ll.rstrip().split(':') for ll in f)
self.label_dict = {r[1]:r[0] for r in rs}
def generate_canonical_kmer(self):
logging.debug('Generating canonical {}mers'.format(self.kmer))
letters = ['A','C','G','T']
all_kmer = [''.join(k) for k in it.product(letters, repeat=self.kmer)]
all_kmer_rev = [x.translate(self.comp_tab)[::-1] for x in all_kmer]
can_kmer = list(it.compress(all_kmer_rev, [not kf < kr for kf,kr in zip(all_kmer, all_kmer_rev)]))
can_kmer.sort()
self.can_kmer = can_kmer
def count_kmer(self, seq):
kmer_d = {}
for i in range(len(seq) - self.kmer + 1):
kmer_for = seq[i:(i+self.kmer)]
kmer_rev = kmer_for.translate(self.comp_tab)[::-1]
if kmer_for < kmer_rev:
kmer = kmer_for
else:
kmer = kmer_rev
if kmer in kmer_d:
kmer_d[kmer] += 1
else:
kmer_d[kmer] = 1
return kmer_d
def xgb_run(self):
if not self.redo:
# Get repeats
self.repeats = [x.cons for x in self.crisprs]
# Load crispr table
df = pd.read_csv(self.out+'crisprs_all.tab', sep='\t')
# Check
if len(df) > 0:
self.any_crispr = True
else:
logging.info('No CRISPRs found.')
os.remove(self.out+'crisprs_all.tab')
# Predict
if self.any_crispr:
self.predict_repeats()
# Add to file
df['Prediction'] = self.z_type
df['Subtype'] = self.z_type
df['Subtype_probability'] = self.z_max
df.loc[df.Subtype_probability < self.pred_prob, 'Prediction'] = 'Unknown'
df['Subtype_probability'] = df['Subtype_probability'].round(3)
# We trust arrays with a known (predictable) repeat sequence
df.loc[df.Subtype_probability >= 0.9, 'Trusted'] = True
df.to_csv(self.out+'crisprs_all.tab', sep='\t', index=False)
def predict_repeats(self):
logging.info('Predicting subtype of CRISPR repeats')
# Prepare
self.load_xgb_model()
self.generate_canonical_kmer()
self.repeats = [x.upper() for x in self.repeats]
# Count kmers (first index is a to ensure all kmers are in the df)
z_df = pd.DataFrame([dict(zip(self.can_kmer, np.zeros(len(self.can_kmer))))] + [self.count_kmer(x) for x in self.repeats]).fillna(0)
z_df = z_df.reindex(sorted(z_df.columns), axis=1)
# Predict
self.z_pred = self.bst.predict(xgb.DMatrix(z_df), ntree_limit=int(self.bst.attr('best_iteration')))
# Get type and max probability
self.z_best = [x.argmax() for x in self.z_pred][1:len(self.z_pred)]
self.z_max = [x.max() for x in self.z_pred][1:len(self.z_pred)]
# Convert to type string
self.z_type = [self.label_dict[str(x)] for x in self.z_best]
def print_xgb(self):
for i in range(len(self.repeats)):
print('{}\t{}\t{}'.format(self.repeats[i],
self.z_type[i],
self.z_max[i]))
| {"hexsha": "180746489c18e6c621d02998fa08b36af6095d1e", "size": 4099, "ext": "py", "lang": "Python", "max_stars_repo_path": "cctyper/xgb.py", "max_stars_repo_name": "ElementGenomicsInc/CRISPRCasTyper", "max_stars_repo_head_hexsha": "c1112b0bcaf3e4adc4bd89a693b84e7f9a185abf", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "cctyper/xgb.py", "max_issues_repo_name": "ElementGenomicsInc/CRISPRCasTyper", "max_issues_repo_head_hexsha": "c1112b0bcaf3e4adc4bd89a693b84e7f9a185abf", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "cctyper/xgb.py", "max_forks_repo_name": "ElementGenomicsInc/CRISPRCasTyper", "max_forks_repo_head_hexsha": "c1112b0bcaf3e4adc4bd89a693b84e7f9a185abf", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.0564516129, "max_line_length": 140, "alphanum_fraction": 0.5342766528, "include": true, "reason": "import numpy", "num_tokens": 1028} |
[STATEMENT]
lemma binsert_absorb[simp]: "binsert a (binsert a x) = binsert a x"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. binsert a (binsert a x) = binsert a x
[PROOF STEP]
by transfer simp | {"llama_tokens": 80, "file": null, "length": 1} |
###########################################################
###########################################################
## EMode - Python interface, by EMode Photonix LLC
###########################################################
## Copyright (c) 2021 EMode Photonix LLC
###########################################################
## NOTES:
## - strings are UTF-8
## - numbers are doubles with IEEE 754 binary64
###########################################################
###########################################################
import os, socket, struct, pickle, time, atexit
from subprocess import Popen
import numpy as np
import scipy.io as sio
class EMode:
def __init__(self, sim='emode', open_existing=False, new_name=False, priority='pN', roaming=False, verbose=False):
'''
Initialize defaults and connects to EMode.
'''
atexit.register(self.close)
try:
sim = str(sim)
except:
raise TypeError("input parameter 'sim' must be a string")
return
try:
priority = str(priority)
except:
raise TypeError("input parameter 'priority' must be a string")
return
self.dsim = sim
self.ext = ".eph"
self.exit_flag = False
self.DL = 2048
self.HOST = '127.0.0.1'
self.LHOST = 'lm.emodephotonix.com'
self.LPORT = '64000'
self.s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.s.bind((self.HOST, 0))
self.PORT_SERVER = int(self.s.getsockname()[1])
self.s.listen(1)
cmd_lst = ['EMode.exe', self.LHOST, self.LPORT, str(self.PORT_SERVER)]
if (verbose == True):
cmd_lst.append('-v')
if (priority != 'pN'):
priority = priority.strip('-')
cmd_lst.append('-'+priority)
if roaming:
cmd_lst.append('-r')
proc = Popen(cmd_lst, stderr=None)
self.conn, self.addr = self.s.accept()
time.sleep(0.2) # wait for EMode to recv
self.conn.sendall(b"connected with Python!")
if (open_existing):
RV = self.call("EM_open", sim=sim, new_name=new_name)
else:
RV = self.call("EM_init", sim=sim)
self.dsim = RV[len("sim:"):]
return
def call(self, function, **kwargs):
'''
Send a command to EMode.
'''
sendset = []
if (isinstance(function, str)):
sendset.append(function.encode('utf-8'))
else:
raise TypeError("input parameter 'function' must be a string")
for kw in kwargs:
sendset.append(kw.encode('utf-8'))
if (isinstance(kwargs[kw], np.ndarray)):
if (len(kwargs[kw].shape) == 1):
kwargs[kw] = list(kwargs[kw])
if (isinstance(kwargs[kw], str)):
if ((len(kwargs[kw]) % 8) == 0):
kwargs[kw] = ' '+kwargs[kw]
sendset.append(kwargs[kw].encode('utf-8'))
elif (isinstance(kwargs[kw], list)):
str_check = [True for kk in kwargs[kw] if isinstance(kk, str)]
if (True in str_check): raise TypeError("list inputs must not contain strings")
sendset.append(struct.pack('@%dd' % int(len(kwargs[kw])), *kwargs[kw]))
elif (isinstance(kwargs[kw], (int, float, np.integer, np.float))):
sendset.append(struct.pack('@1d', kwargs[kw]))
else:
raise TypeError("type not recognized in '**kwargs' as str, list, integer, or float")
if ('sim' not in kwargs):
sendset.append('sim'.encode('utf-8'))
sendset.append(self.dsim.encode('utf-8'))
sendstr = b':::::'.join(sendset)
try:
self.conn.sendall(sendstr)
RV = self.conn.recv(self.DL)
except:
# Exited due to license checkout
self.conn.close()
self.exit_flag = True
if (self.exit_flag):
raise RuntimeError("License checkout error!")
return RV.decode("utf-8")
def get(self, variable):
'''
Return data from simulation file.
'''
if (not isinstance(variable, str)):
raise TypeError("input parameter 'variable' must be a string")
fl = open(self.dsim+self.ext, 'rb')
f = pickle.load(fl)
fl.close()
if (variable in list(f.keys())):
data = f[variable]
else:
print("Data does not exist.")
return
return data
def inspect(self):
'''
Return list of keys from available data in simulation file.
'''
fl = open(self.dsim+self.ext, 'rb')
f = pickle.load(fl)
fl.close()
fkeys = list(f.keys())
fkeys.remove("EMode_simulation_file")
return fkeys
def close(self, **kwargs):
'''
Send saving options to EMode and close the connection.
'''
if (self.conn.fileno() == -1): return
self.call("EM_close", **kwargs)
self.conn.sendall(b"exit")
self.conn.close()
print("Exited EMode")
return
def open_file(sim):
'''
Opens an EMode simulation file with either .eph or .mat extension.
'''
ext = '.eph'
mat = '.mat'
found = False
for file in os.listdir():
if ((file == sim+ext) or ((file == sim) and (sim.endswith(ext)))):
found = True
if (sim.endswith(ext)):
sim = sim.replace(ext,'')
fl = open(sim+ext, 'rb')
f = pickle.load(fl)
fl.close()
elif ((file == sim+mat) or ((file == sim) and (sim.endswith(mat)))):
found = True
f = sio.loadmat(sim+mat)
if (not found):
print("ERROR: file not found!")
return "ERROR"
return f
def get(variable, sim='emode'):
'''
Return data from simulation file.
'''
if (not isinstance(variable, str)):
raise TypeError("input parameter 'variable' must be a string")
if (not isinstance(sim, str)):
raise TypeError("input parameter 'sim' must be a string")
f = open_file(sim=sim)
if (variable in list(f.keys())):
data = f[variable]
else:
print("Data does not exist.")
return
return data
def inspect(sim='emode'):
'''
Return list of keys from available data in simulation file.
'''
if (not isinstance(sim, str)):
raise TypeError("input parameter 'sim' must be a string")
f = open_file(sim=sim)
fkeys = list(f.keys())
fkeys.remove("EMode_simulation_file")
return fkeys
| {"hexsha": "f515f002621fe92a9ebd7bdca8224daca3277a5d", "size": 6825, "ext": "py", "lang": "Python", "max_stars_repo_path": "emodeconnection/emodeconnection.py", "max_stars_repo_name": "emode-photonix/emodeconnection", "max_stars_repo_head_hexsha": "d39b6a0c356bce8a3284087da37513c1c2ea3bf3", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "emodeconnection/emodeconnection.py", "max_issues_repo_name": "emode-photonix/emodeconnection", "max_issues_repo_head_hexsha": "d39b6a0c356bce8a3284087da37513c1c2ea3bf3", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "emodeconnection/emodeconnection.py", "max_forks_repo_name": "emode-photonix/emodeconnection", "max_forks_repo_head_hexsha": "d39b6a0c356bce8a3284087da37513c1c2ea3bf3", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.8125, "max_line_length": 118, "alphanum_fraction": 0.5050549451, "include": true, "reason": "import numpy,import scipy", "num_tokens": 1527} |
int_or_bigint(x) = (y=mod(x,Int); x==y ? convert(Int,x) : convert(BigInt,y))
mutable struct UnpickleState
stack :: Stack
proto :: Int
pos :: Int
memo :: Memo
end
UnpickleState() = UnpickleState(Stack(), HIGHEST_PROTOCOL, 1, Memo())
function read_opcode(io::IO, state::UnpickleState)
ans = read(io, OpCode)
state.pos += sizeof(OpCode)
ans
end
function read_prim_l(io::IO, state::UnpickleState, ::Type{T}) where {T}
ans = ltoh(read(io, T))
state.pos += sizeof(T)
ans
end
function read_prim_b(io::IO, state::UnpickleState, ::Type{T}) where {T}
ans = ntoh(read(io, T))
state.pos += sizeof(T)
ans
end
read_u1(io::IO, state::UnpickleState) = read_prim_l(io, state, UInt8)
read_u2(io::IO, state::UnpickleState) = read_prim_l(io, state, UInt16)
read_u4(io::IO, state::UnpickleState) = read_prim_l(io, state, UInt32)
read_u8(io::IO, state::UnpickleState) = read_prim_l(io, state, UInt64)
read_s4(io::IO, state::UnpickleState) = read_prim_l(io, state, Int32)
read_f8(io::IO, state::UnpickleState) = read_prim_b(io, state, Float64)
function read_bytes(io::IO, state::UnpickleState, sz::Integer)
ans = read(io, sz)
state.pos += length(ans)
length(ans) == sz || error("unexpected end of file")
ans
end
function read_line(io::IO, state::UnpickleState)
ans = readuntil(io, '\n', keep=false)
state.pos += sizeof(ans) + 1
ans
end
function read_stringnl_noescape(io::IO, state::UnpickleState)
read_line(io, state)
end
function read_decimalnl_short(io::IO, state::UnpickleState)
line = read_line(io, state)
if line == "00"
return false
elseif line == "01"
return true
end
return int_or_bigint(parse(BigInt, line))
end
function read_floatnl(io::IO, state::UnpickleState)
line = read_line(io, state)
return parse(Float64, line)
end
function unpickle(io::IO)
state = UnpickleState()
# optional first PROTO opcode
op = read_opcode(io, state)
if op == OP_PROTO
state.proto = Int(read_u1(io, state))
stop = false
else
stop = doop(op, state, io)
end
# remaining opcodes
while !stop
op = read_opcode(io, state)
stop = doop(op, state, io)
end
# done
len = length(state.stack)
if len == 1
return pop!(state.stack)
else
error("unexpected STOP")
end
end
function doop(op::OpCode, state::UnpickleState, io::IO)
if op == OP_MARK
mark!(state.stack)
elseif op == OP_STOP
return true
elseif op == OP_POP
pop!(state.stack)
elseif op == OP_POP_MARK
poptomark!(state.stack)
elseif op == OP_DUP
push!(state.stack, top(state.stack))
elseif op == OP_FLOAT
val = read_floatnl(io, state)
push!(state.stack, val)
elseif op == OP_INT
val = read_decimalnl_short(io, state)
push!(state.stack, val)
elseif op == OP_BININT
val = int_or_bigint(read_s4(io, state))
push!(state.stack, val)
elseif op == OP_BININT1
val = int_or_bigint(read_u1(io, state))
push!(state.stack, val)
elseif op == OP_LONG
error("opcode not implemented: $op")
elseif op == OP_BININT2
val = int_or_bigint(read_u2(io, state))
push!(state.stack, val)
elseif op == OP_NONE
val = nothing
push!(state.stack, val)
elseif op == OP_PERSID
error("opcode not implemented: $op")
elseif op == OP_BINPERSID
error("opcode not implemented: $op")
elseif op == OP_REDUCE
args = pop!(state.stack)::PyTuple
func = pop!(state.stack)
val = PyFuncCall(func, args.values)
push!(state.stack, val)
elseif op == OP_STRING
error("opcode not implemented: $op")
elseif op == OP_BINSTRING
error("opcode not implemented: $op")
elseif op == OP_SHORT_BINSTRING
error("opcode not implemented: $op")
elseif op == OP_UNICODE
error("opcode not implemented: $op")
elseif op == OP_BINUNICODE
sz = read_u4(io, state)
val = String(read_bytes(io, state, sz))
push!(state.stack, val)
elseif op == OP_APPEND
x = pop!(state.stack)
list = top(state.stack)::PyList
push!(list.values, x)
elseif op == OP_BUILD
arg = pop!(state.stack)
obj = top(state.stack)
pysetstate!(obj, arg)
elseif op == OP_GLOBAL
mod = read_stringnl_noescape(io, state)
attr = read_stringnl_noescape(io, state)
val = PyGlobal(mod, attr)
push!(state.stack, val)
elseif op == OP_DICT
kvs = poptomark!(state.stack)
iseven(length(kvs)) || error("odd number of keys and values")
val = PyDict(Pair{Any,Any}[Pair{Any,Any}(kvs[i], kvs[i+1]) for i in 1:2:length(kvs)])
push!(state.stack, val)
elseif op == OP_EMPTY_DICT
val = PyDict()
push!(state.stack, val)
elseif op == OP_APPENDS
xs = poptomark!(state.stack)
list = top(state.stack)::PyList
append!(list.values, xs)
elseif op == OP_GET
error("opcode not implemented: $op")
elseif op == OP_BINGET
idx = read_u1(io, state)
val = state.memo[idx]
push!(state.stack, val)
elseif op == OP_INST
error("opcode not implemented: $op")
elseif op == OP_LONG_BINGET
idx = read_u4(io, state)
val = state.memo[idx]
push!(state.stack, val)
elseif op == OP_LIST
val = PyList(poptomark!(state.stack))
push!(state.stack, val)
elseif op == OP_EMPTY_LIST
val = PyList()
push!(state.stack, val)
elseif op == OP_OBJ
error("opcode not implemented: $op")
elseif op == OP_PUT
idx = read_decimalnl_short(io, state)
state.memo[idx] = top(state.stack)
elseif op == OP_BINPUT
idx = read_u1(io, state)
state.memo[idx] = top(state.stack)
elseif op == OP_LONG_BINPUT
idx = read_u4(io, state)
state.memo[idx] = top(state.stack)
elseif op == OP_SETITEM
v = pop!(state.stack)
k = pop!(state.stack)
dict = top(state.stack)::PyDict
push!(dict.items, Pair{Any,Any}(k, v))
elseif op == OP_TUPLE
val = PyTuple(poptomark!(state.stack))
push!(state.stack, val)
elseif op == OP_EMPTY_TUPLE
val = PyTuple()
push!(state.stack, val)
elseif op == OP_SETITEMS
kvs = poptomark!(state.stack)
iseven(length(kvs)) || error("odd number of keys and values")
dict = top(state.stack)::PyDict
for i in 1:2:length(kvs)
push!(dict.items, Pair{Any,Any}(kvs[i], kvs[i+1]))
end
elseif op == OP_BINFLOAT
val = read_f8(io, state)
push!(state.stack, val)
elseif op == OP_PROTO
error("unexpected op: $op")
elseif op == OP_NEWOBJ
args = pop!(state.stack)::PyTuple
cls = pop!(state.stack)
val = PyNewObj(cls, args.values)
push!(state.stack, val)
elseif op == OP_EXT1
error("opcode not implemented: $op")
elseif op == OP_EXT2
error("opcode not implemented: $op")
elseif op == OP_EXT4
error("opcode not implemented: $op")
elseif op == OP_TUPLE1
x1 = pop!(state.stack)
val = PyTuple(Any[x1])
push!(state.stack, val)
elseif op == OP_TUPLE2
x2 = pop!(state.stack)
x1 = pop!(state.stack)
val = PyTuple(Any[x1, x2])
push!(state.stack, val)
elseif op == OP_TUPLE3
x3 = pop!(state.stack)
x2 = pop!(state.stack)
x1 = pop!(state.stack)
val = PyTuple(Any[x1, x2, x3])
push!(state.stack, val)
elseif op == OP_NEWTRUE
val = true
push!(state.stack, val)
elseif op == OP_NEWFALSE
val = false
push!(state.stack, val)
elseif op == OP_LONG1
error("opcode not implemented: $op")
elseif op == OP_LONG4
error("opcode not implemented: $op")
elseif op == OP_BINBYTES
sz = read_u4(io, state)
val = PyBytes(read_bytes(io, state, sz))
push!(state.stack, val)
elseif op == OP_SHORT_BINBYTES
sz = read_u1(io, state)
val = PyBytes(read_bytes(io, state, sz))
push!(state.stack, val)
elseif op == OP_SHORT_BINUNICODE
sz = read_u1(io, state)
val = String(read_bytes(io, state, sz))
push!(state.stack, val)
elseif op == OP_BINUNICODE8
sz = read_u8(io, state)
val = String(read_bytes(io, state, sz))
push!(state.stack, val)
elseif op == OP_BINBYTES8
sz = read_u8(io, state)
val = PyBytes(read_bytes(io, state, sz))
push!(state.stack, val)
elseif op == OP_EMPTY_SET
val = PySet()
push!(state.stack, val)
elseif op == OP_ADDITEMS
xs = poptomark!(state.stack)
set = top(state.stack)::PySet
append!(set.values, xs)
elseif op == OP_FROZENSET
val = PyFrozenSet(poptomark!(state.stack))
push!(state.stack, val)
elseif op == OP_NEWOBJ_EX
kwargs = pop!(state.stack)::PyDict
args = pop!(state.stack)::PyTuple
cls = pop!(state.stack)
val = PyNewObj(cls, args.values, kwargs.items)
push!(state.stack, val)
elseif op == OP_STACK_GLOBAL
attr = pop!(state.stack)::String
mod = pop!(state.stack)::String
val = PyGlobal(mod, attr)
push!(state.stack, val)
elseif op == OP_MEMOIZE
push!(state.memo, top(state.stack))
elseif op == OP_FRAME
# TODO: we could reuse `frameio` to avoid some allocations
sz = read_u8(io, state)
pos = state.pos
frameio = IOBuffer(read_bytes(io, state, sz))
while !eof(frameio)
op = read_opcode(frameio, state)
stop = doop(op, state, frameio)
stop && return true
end
state.pos = pos + sz
elseif op == OP_BYTEARRAY8
sz = read_u8(io, state)
val = PyByteArray(read_bytes(io, state, sz))
push!(state.stack, val)
elseif op == OP_NEXT_BUFFER
error("opcode not implemented: $op")
elseif op == OP_READONLY_BUFFER
error("opcode not implemented: $op")
else
error("opcode not implemented: $op")
end
return false
end
| {"hexsha": "0b6c729588ce151b21f2a65393042f3224019607", "size": 10391, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/PickleIO/unpickle.jl", "max_stars_repo_name": "cjdoris/PythonIO.jl", "max_stars_repo_head_hexsha": "cd2adb43c41ea55058beadb1ff2d9c0c57569f8e", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2022-03-11T02:18:51.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-11T02:18:51.000Z", "max_issues_repo_path": "src/PickleIO/unpickle.jl", "max_issues_repo_name": "cjdoris/PythonIO.jl", "max_issues_repo_head_hexsha": "cd2adb43c41ea55058beadb1ff2d9c0c57569f8e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/PickleIO/unpickle.jl", "max_forks_repo_name": "cjdoris/PythonIO.jl", "max_forks_repo_head_hexsha": "cd2adb43c41ea55058beadb1ff2d9c0c57569f8e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 26.4402035623, "max_line_length": 93, "alphanum_fraction": 0.5901260706, "num_tokens": 2841} |
import math
from typing import List
from typing import Sequence
from typing import Tuple
import matplotlib.axes
import matplotlib.pyplot as plt
import numpy as np
def prepare_axes(segments: List[str], columns_num: int, figsize: Tuple[int, int]) -> Sequence[matplotlib.axes.Axes]:
"""Prepare axes according to segments, figure size and number of columns."""
segments_number = len(segments)
columns_num = min(columns_num, len(segments))
rows_num = math.ceil(segments_number / columns_num)
figsize = (figsize[0] * columns_num, figsize[1] * rows_num)
_, ax = plt.subplots(rows_num, columns_num, figsize=figsize, constrained_layout=True)
ax = np.array([ax]).ravel()
return ax
| {"hexsha": "2684d74e46a6bc59c5dd24f2275e119dbf5dd10c", "size": 707, "ext": "py", "lang": "Python", "max_stars_repo_path": "etna/analysis/utils.py", "max_stars_repo_name": "martins0n/etna", "max_stars_repo_head_hexsha": "51e9cec5183da2499ca247b0e2db215507246ceb", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 326, "max_stars_repo_stars_event_min_datetime": "2021-11-18T15:30:50.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-31T09:44:15.000Z", "max_issues_repo_path": "etna/analysis/utils.py", "max_issues_repo_name": "martins0n/etna", "max_issues_repo_head_hexsha": "51e9cec5183da2499ca247b0e2db215507246ceb", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 305, "max_issues_repo_issues_event_min_datetime": "2021-11-17T10:28:31.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-31T18:05:03.000Z", "max_forks_repo_path": "etna/analysis/utils.py", "max_forks_repo_name": "martins0n/etna", "max_forks_repo_head_hexsha": "51e9cec5183da2499ca247b0e2db215507246ceb", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 29, "max_forks_repo_forks_event_min_datetime": "2021-11-21T12:10:48.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-31T22:55:06.000Z", "avg_line_length": 33.6666666667, "max_line_length": 116, "alphanum_fraction": 0.738330976, "include": true, "reason": "import numpy", "num_tokens": 167} |
#!/usr/bin/python
#######################################################################
import torch
import torch.nn.functional as F
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import math
import pdb
from torch.autograd import Variable
#######################################################################
# def barlow_loss(B,C,lamda):
# Batch_size,_, Dim = B.size()
# batch_correlation=torch.matmul(C,B.transpose(1,2))/Batch_size
# I=torch.eye(Dim)
# IR=torch.cat([I.unsqueeze(0)]*Batch_size,dim=0)
# batch_correlation_loss=(batch_correlation-IR).pow(2)
# #breakpoint()
# off_diag = lamda*(batch_correlation_loss*(1-IR)).sum()
# diag = (IR*batch_correlation_loss).sum()
# similarity = off_diag + diag
# print('off_diag',off_diag,'diag',diag)
# #print(diag<off_diag/20)
# return similarity
#######################################################################
#==========================================================================
def cal_performance(pred, gold,IGNORE_ID,normalize_length=False,smoothing=0.0):
"""Calculate cross entropy loss, apply label smoothing if needed.
Args: pred: N x T x C, score before softmax ;;;; gold: N x T """
pred=pred.unsqueeze(1)
gold=gold.unsqueeze(1)
#breakpoint()
pred = pred.view(-1, pred.size(2))
gold = gold.contiguous().view(-1)
loss = cal_loss(pred, gold,IGNORE_ID,normalize_length,smoothing)
pred = pred.max(1)[1]
non_pad_mask = gold.ne(IGNORE_ID)
n_correct = pred.eq(gold)
n_correct = n_correct.masked_select(non_pad_mask).sum().item()
n_correct=n_correct/float(non_pad_mask.sum())
n_correct=1.0-n_correct
return loss, n_correct
#===============================================
#===============================================
def cal_loss(pred, gold,IGNORE_ID,normalize_length,smoothing):
"""Calculate cross entropy loss, apply label smoothing if needed. """
normalize_length=True
if smoothing > 0.0:
eps = smoothing
n_class = pred.size(1)
#breakpoint()
# Generate one-hot matrix: N x C.
# Only label position is 1 and all other positions are 0
# gold include -1 value (IGNORE_ID) and this will lead to assert error
gold_for_scatter = gold.ne(IGNORE_ID).long() * gold
one_hot = torch.zeros_like(pred).scatter(1, gold_for_scatter.view(-1, 1), 1)
one_hot = one_hot * (1 - eps) + (1 - one_hot) * eps / n_class
#breakpoint()
log_prb = F.log_softmax(pred, dim=1)
non_pad_mask = gold.ne(IGNORE_ID)
n_word = non_pad_mask.sum().item()
loss = -(one_hot * log_prb).sum(dim=1)
loss = loss.masked_select(non_pad_mask).sum() / n_word
print('CE_loss',loss)
else:
loss = F.cross_entropy(pred, gold,
ignore_index=IGNORE_ID,
reduction='elementwise_mean')
return loss
#######################################################################
def barlow_loss_VICReg(z_a,z_b,lamda):
"""
x_a, x_b = augment(x)
#compute representations
z_a = f(x_a) # N x D
z_b = f(x_b) # N x D
# invariance loss
sim_loss = mse_loss(z_a, z_b)
# variance loss
std_z_a = torch.sqrt(z_a.var(dim=0) + 1e-04)
std_z_b = torch.sqrt(z_b.var(dim=0) + 1e-04)
std_loss = torch.mean(relu(1 - std_z_a))
std_loss = std_loss + torch.mean(relu(1 - std_z_b))
# covariance loss
z_a = z_a - z_a.mean(dim=0)
z_b = z_b - z_b.mean(dim=0)
cov_z_a = (z_a.T @ z_a) / (N - 1)
cov_z_b = (z_b.T @ z_b) / (N - 1)
cov_loss = off_diagonal(cov_z_a).pow_(2).sum() / D
cov_loss = cov_loss + off_diagonal(cov_z_b).pow_(2).sum() / D
# loss
loss = lambda * sim_loss + mu * std_loss + nu * cov_loss
# optimization step
loss.backward()
optimizer.step()
"""
lamda,mu,nu=0.1,0.1,0.1
N,D = z_a.size()
mse_loss = torch.nn.MSELoss(reduction='none')
sim_loss = mse_loss(z_a, z_b)
###mean keeps loss independent of proj_dim
sim_loss = sim_loss.mean(dim=1)
std_z_a = torch.sqrt(torch.var(z_a,dim=0) + 1e-04)
std_z_b = torch.sqrt(torch.var(z_b,dim=0) + 1e-04)
std_loss = torch.mean(F.relu(1 - std_z_a)) + torch.mean(F.relu(1 - std_z_b))
z_a = z_a -torch.mean(z_a, dim=0, keepdim=True)
z_b = z_b -torch.mean(z_b, dim=0, keepdim=True)
cov_z_a = torch.mm(z_a.transpose(0,1),z_a)/ (N - 1)
cov_z_b = torch.mm(z_b.transpose(0,1),z_b)/ (N - 1)
IR=torch.cat([torch.eye(D).unsqueeze(0)]*N,dim=0)
offdiag_a = ((cov_z_a*(1-IR))**2).sum(dim=1) / D
offdiag_b = ((cov_z_b*(1-IR))**2).sum(dim=1) / D
cov_loss = offdiag_a + offdiag_b
cov_loss = cov_loss.sum(dim=1)
loss = lamda * sim_loss + mu * std_loss + nu * cov_loss
loss = loss.sum()
#print('loss',loss,',sim_loss,',sim_loss,'std_loss', std_loss,'cov_loss', cov_loss)
return loss
#######################################################################
class Barlow_CE_Loss(nn.Module):
def __init__(self,input_dim,proj_dim):
super(Barlow_CE_Loss,self).__init__()
self.Linear_proj=nn.Linear(input_dim,proj_dim)
self.barlow_loss=barlow_loss_VICReg
self.cal_performance=cal_performance
def forward(self,B,C,D):
z_a = self.Linear_proj(B)
z_b = self.Linear_proj(C)
Bar_los = self.barlow_loss(z_a,z_b,1)
self.cal_performance(B,D,IGNORE_ID=1024,normalize_length=False,smoothing=0.1)
print(Bar_los)
#######################################################################
loss=Barlow_CE_Loss(1024,200)
for i in range(1,100):
B=torch.randn(2,1024)
C=torch.randn(2,1024)
D=torch.randint(low=0, high=1024,size=(2,1))
# C=-1*B
#barlow_loss(B,C,1)
#barlow_loss_VICReg(B,C,1)
loss(B,C,D)
| {"hexsha": "4e13d914807f15931122508d86dd693981f5f080", "size": 6259, "ext": "py", "lang": "Python", "max_stars_repo_path": "ASR_TransV1/Barlow_Twins.py", "max_stars_repo_name": "BUTSpeechFIT/ASR_Transformer", "max_stars_repo_head_hexsha": "814f720aa8265e9a377869f93dc65b251338e985", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-10-25T00:21:40.000Z", "max_stars_repo_stars_event_max_datetime": "2020-10-25T00:21:40.000Z", "max_issues_repo_path": "ASR_TransV1/Barlow_Twins.py", "max_issues_repo_name": "BUTSpeechFIT/ASR_Transformer", "max_issues_repo_head_hexsha": "814f720aa8265e9a377869f93dc65b251338e985", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "ASR_TransV1/Barlow_Twins.py", "max_forks_repo_name": "BUTSpeechFIT/ASR_Transformer", "max_forks_repo_head_hexsha": "814f720aa8265e9a377869f93dc65b251338e985", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-09-08T10:32:55.000Z", "max_forks_repo_forks_event_max_datetime": "2021-09-08T10:32:55.000Z", "avg_line_length": 36.1791907514, "max_line_length": 93, "alphanum_fraction": 0.5363476594, "include": true, "reason": "import numpy", "num_tokens": 1659} |
##
# This software was developed and / or modified by Raytheon Company,
# pursuant to Contract DG133W-05-CQ-1067 with the US Government.
#
# U.S. EXPORT CONTROLLED TECHNICAL DATA
# This software product contains export-restricted data whose
# export/transfer/disclosure is restricted by U.S. law. Dissemination
# to non-U.S. persons whether in the United States or abroad requires
# an export license or other authorization.
#
# Contractor Name: Raytheon Company
# Contractor Address: 6825 Pine Street, Suite 340
# Mail Stop B8
# Omaha, NE 68106
# 402.291.0100
#
# See the AWIPS II Master Rights File ("Master Rights File.pdf") for
# further licensing information.
##
# ----------------------------------------------------------------------------
# This software is in the public domain, furnished "as is", without technical
# support, and with no warranty, express or implied, as to its usefulness for
# any purpose.
#
# RHTool
#
# Author:
# ----------------------------------------------------------------------------
##
# This is an absolute override file, indicating that a higher priority version
# of the file will completely replace a lower priority version of the file.
##
ToolType = "numeric"
WeatherElementEdited = "RH"
from numpy import *
import SmartScript
class Tool (SmartScript.SmartScript):
def __init__(self, dbss):
SmartScript.SmartScript.__init__(self, dbss)
def execute(self, T, Td):
"This smart tool uses temp and dew pt to derive RH"
# Determine new value
Tc = .556 * (T - 32.0)
Tdc = .556 * (Td - 32.0)
Vt = 6.11 * pow(10,(Tc * 7.5 / (Tc + 237.3)))
Vd = 6.11 * pow(10,(Tdc * 7.5 / (Tdc + 237.3)))
RH = (Vd / Vt) * 100.0
# Return the new value
return RH
| {"hexsha": "29100997ad26392fdf9922359e00f2fd5bf26d3e", "size": 1871, "ext": "py", "lang": "Python", "max_stars_repo_path": "cave/com.raytheon.viz.gfe/localization/gfe/userPython/smartTools/RHTool.py", "max_stars_repo_name": "srcarter3/awips2", "max_stars_repo_head_hexsha": "37f31f5e88516b9fd576eaa49d43bfb762e1d174", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "cave/com.raytheon.viz.gfe/localization/gfe/userPython/smartTools/RHTool.py", "max_issues_repo_name": "srcarter3/awips2", "max_issues_repo_head_hexsha": "37f31f5e88516b9fd576eaa49d43bfb762e1d174", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "cave/com.raytheon.viz.gfe/localization/gfe/userPython/smartTools/RHTool.py", "max_forks_repo_name": "srcarter3/awips2", "max_forks_repo_head_hexsha": "37f31f5e88516b9fd576eaa49d43bfb762e1d174", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-10-30T00:03:05.000Z", "max_forks_repo_forks_event_max_datetime": "2021-10-30T00:03:05.000Z", "avg_line_length": 30.1774193548, "max_line_length": 78, "alphanum_fraction": 0.5900587921, "include": true, "reason": "from numpy", "num_tokens": 464} |
# @recipe function f(e::Event;coloring=[], labeling=[],show_pulses=true)
# if show_pulses==true
# n_wps = size(e.weighting_potentials,1)
# width = 800
# length = width+n_wps*width/2
# size --> (width,length)
# myheights = [width/length]
# for i in 1:n_wps
# push!(myheights,(width/2)/length)
# end
# layout --> (n_wps+1,1) #grid(n_wps+1,1,heights=myheights)
# @series begin
# subplot := 1
# coloring --> coloring
# labeling --> labeling
# e.detector
# end
# for itr in 1:e.n_sites
# @series begin
# itr == 1 ? showlabel --> true : showlabel --> false
# subplot := 1
# myscale := 1/e.detector.geometry_unit_factor
# e.trajectories_e[itr]
# end
# @series begin
# itr == 1 ? showlabel --> true : showlabel --> false
# subplot := 1
# myscale := 1/e.detector.geometry_unit_factor
# e.trajectories_h[itr]
# end
# end
# for iwp in 1:n_wps
# @series begin
# subplot := 1+iwp
# e.pulses[iwp]
# end
# end
# else
# width,length = 800, 800
# size --> (width,length)
# @series begin
# subplot := 1
# coloring --> coloring
# labeling --> labeling
# e.detector
# end
# for itr in 1:e.n_sites
# @series begin
# itr == 1 ? showlabel --> true : showlabel --> false
# subplot := 1
# myscale := 1/e.detector.geometry_unit_factor
# e.trajectories_e[itr]
# end
# @series begin
# itr == 1 ? showlabel --> true : showlabel --> false
# subplot := 1
# myscale := 1/e.detector.geometry_unit_factor
# e.trajectories_h[itr]
# end
# end
# end
# end
| {"hexsha": "30d82e4481f9f34dabf4631c0c2d31a69d876dcb", "size": 2116, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/Event/plot_recipes.jl", "max_stars_repo_name": "UnofficialJuliaMirrorSnapshots/SolidStateDetectors.jl-71e43887-2bd9-5f77-aebd-47f656f0a3f0", "max_stars_repo_head_hexsha": "9c2c535181052a8e801650f34286b64696397657", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2022-03-21T03:32:10.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-21T03:32:15.000Z", "max_issues_repo_path": "src/Event/plot_recipes.jl", "max_issues_repo_name": "UnofficialJuliaMirrorSnapshots/SolidStateDetectors.jl-71e43887-2bd9-5f77-aebd-47f656f0a3f0", "max_issues_repo_head_hexsha": "9c2c535181052a8e801650f34286b64696397657", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/Event/plot_recipes.jl", "max_forks_repo_name": "UnofficialJuliaMirrorSnapshots/SolidStateDetectors.jl-71e43887-2bd9-5f77-aebd-47f656f0a3f0", "max_forks_repo_head_hexsha": "9c2c535181052a8e801650f34286b64696397657", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.0606060606, "max_line_length": 72, "alphanum_fraction": 0.4428166352, "num_tokens": 549} |
using LightGraphs
using SparseArrays
import Base: show, eltype, copy
import LightGraphs: nv, has_edge, has_vertex, add_edge!, rem_edge!, rem_vertex!,
rem_vertices!, add_vertex!, add_vertices!, outneighbors, inneighbors, vertices, edges,
adjacency_matrix, src, dst, nv, edgetype
export AbstractMultigraph
# export multype
"""
AbstractMultigraph{T}<:AbstractGraph{T}
An abstract type representing a multigraph.
"""
abstract type AbstractMultigraph{T<:Integer} <:AbstractGraph{T} end
function copy(mg::AbstractMultigraph{T}) where {T} end
function show(io::IO, mg::AbstractMultigraph{T}) where {T}
dir = is_directed(mg) ? "directed" : "undirected"
print(io, "{$(nv(mg)), $(ne(mg))} $(dir) $(T) multigraph")
end
eltype(mg::AbstractMultigraph{T}) where {T<:Integer} = T
multype(mg::AbstractMultigraph) = Int
edgetype(mg::AbstractMultigraph) = MultipleEdge{eltype(mg), multype(mg)}
function nv(mg::AbstractMultigraph) end
function vertices(mg::AbstractMultigraph) end
function adjacency_matrix(mg::AbstractMultigraph) end
function has_edge(mg::AbstractMultigraph, e::AbstractMultipleEdge)
s = src(e)
d = dst(e)
if has_vertex(mg, s) && has_vertex(mg, d)
return mul(mg, s, d) >= mul(e)
end
return false
end
has_edge(mg::AbstractMultigraph, t) = has_edge(mg, MultipleEdge(t))
add_edge!(mg::AbstractMultigraph, t) = add_edge!(mg, MultipleEdge(t))
rem_edge!(mg::AbstractMultigraph, t) = rem_edge!(mg, MultipleEdge(t))
has_edge(mg::AbstractMultigraph, x, y) = has_edge(mg, MultipleEdge(x, y))
add_edge!(mg::AbstractMultigraph, x, y) = add_edge!(mg, MultipleEdge(x, y))
rem_edge!(mg::AbstractMultigraph, x, y) = rem_edge!(mg, MultipleEdge(x, y))
"""
has_edge(mg::AbstractMultigraph, s, d, mul)
Return `true` if `mg` has a multiple edge from `s` to `d` whose multiplicity
is not less than `mul`.
## Examples
```julia
julia> using LightGraphs, Multigraphs
julia> mg = Multigraph(3);
julia> add_edge!(mg, 1, 2, 2);
julia> has_edge(mg, 1, 2, 3)
false
julia> has_edge(mg, 1, 2, 2)
true
```
"""
has_edge(mg::AbstractMultigraph, x, y, z) = has_edge(mg, MultipleEdge(x, y, z))
"""
add_edge!(mg::AbstractMultigraph, s, d, mul)
Add a multiple edge from `s` to `d` multiplicity `mul`. If there is a multiple
edge from `s` to `d`, it will increase its multiplicity by `mul`.
Return `true` multiple edge was added successfully, otherwise return `false`.
## Examples
```julia
julia> using LightGraphs, Multigraphs
julia> mg = Multigraph(3);
julia> e = MultipleEdge(1, 2, 1);
julia> add_edge!(mg, e);
julia> ne(mg, true)
1
julia> add_edge!(mg, e);
julia> ne(mg, true)
2
```
"""
add_edge!(mg::AbstractMultigraph, x, y, z) = add_edge!(mg, MultipleEdge(x, y, z))
"""
rem_edge!(mg::AbstractMultigraph, s, d, mul)
Remove the multiplicity of edge from `s` to `d` by `mul` in `mg`, if `mg` has such
a multiple edge.
## Examples
```julia
julia> using LightGraphs, Multigraphs
julia> mg = Multigraph(3);
julia> add_edge!(mg, 1, 2, 2);
julia> rem_edge!(mg, 1, 2, 3)
false
julia> rem_edge!(mg, 1, 2, 2)
true
```
"""
rem_edge!(mg::AbstractMultigraph, x, y, z) = rem_edge!(mg, MultipleEdge(x, y, z))
has_vertex(mg::AbstractMultigraph, v::Integer) = v in vertices(mg)
rem_vertex!(mg::AbstractMultigraph{T}, v::T) where {T<:Integer} = rem_vertices!(mg, [v])
add_vertex!(mg::AbstractMultigraph{T}) where {T<:Integer} = add_vertices!(mg, one(T))
function outneighbors(mg::AbstractMultigraph, v) end
function inneighbors(mg::AbstractMultigraph, v) end
"""
edges(mg::AbstractMultigraph)
Return a `MultipleEdgeIter` for `mg`.
## Examples
```jltestdoc
julia>
julia> using LightGraphs, Multigraphs
julia> mg = Multigraph(path_graph(4));
julia> add_edge!(mg, 1, 3, 2);
julia> collect(edges(mg))
4-element Array{Any,1}:
Multiple edge 1 => 2 with multiplicity 1
Multiple edge 1 => 3 with multiplicity 2
Multiple edge 2 => 3 with multiplicity 1
Multiple edge 3 => 4 with multiplicity 1
```
"""
edges(mg::AbstractMultigraph) = MultipleEdgeIter(mg)
"""
mul(mg::AbstractMultigraph, src, dst)
Return the multiplicity of the edge from `src` to `dst`.
"""
| {"hexsha": "46b9331aabf55e6175bd2e75fef308eccda818e7", "size": 4116, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/abstract_multigraph.jl", "max_stars_repo_name": "QuantumBFS/Multigraphs.jl", "max_stars_repo_head_hexsha": "8d1c07598f5ce2083c18c358e5cded601b634a0a", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 23, "max_stars_repo_stars_event_min_datetime": "2020-03-10T07:46:22.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-16T04:30:09.000Z", "max_issues_repo_path": "src/abstract_multigraph.jl", "max_issues_repo_name": "QuantumBFS/Multigraphs.jl", "max_issues_repo_head_hexsha": "8d1c07598f5ce2083c18c358e5cded601b634a0a", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 11, "max_issues_repo_issues_event_min_datetime": "2020-03-10T03:08:18.000Z", "max_issues_repo_issues_event_max_datetime": "2021-12-02T00:26:23.000Z", "max_forks_repo_path": "src/abstract_multigraph.jl", "max_forks_repo_name": "QuantumBFS/Multigraphs.jl", "max_forks_repo_head_hexsha": "8d1c07598f5ce2083c18c358e5cded601b634a0a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2020-11-24T11:05:13.000Z", "max_forks_repo_forks_event_max_datetime": "2021-12-16T04:32:49.000Z", "avg_line_length": 25.0975609756, "max_line_length": 90, "alphanum_fraction": 0.7006802721, "num_tokens": 1316} |
from sys import argv
from contextlib import redirect_stdout
from random import randint
import numpy as np
# generate two RxC matrices and their multiplication
# $ python c_array_gen.py 16 16 > data.txt
RS = 8
CS = 8
fm_np = np.random.randint(-128,127, size=(RS, CS))
sm_np = np.random.randint(-128,127, size=(RS, CS))
verify_np = np.zeros((RS,CS))
def create_c_array(RS,CS,rnd_matrix,nm):
print(f'static const signed char DATA_{nm}[{RS}][{CS}] = ',end='')
for row in range(0,RS):
if(row == 0):
print('{ {',end='')
else:
print(' {',end='')
for column in range(0,CS-1):
r_int = rnd_matrix[row,column]
print("{0:>6}".format(r_int) + ',',end='')
r_int = rnd_matrix[row,column+1]
print("{0:>6}".format(r_int) ,end='}')
if(row == RS-1):
print("};")
else:
print(',')
create_c_array(RS,CS,fm_np,0)
print('',end='\n')
create_c_array(RS,CS,sm_np,1)
print('',end='\n')
verify_np = np.matmul(fm_np, sm_np)
create_c_array(RS,CS,verify_np,2)
| {"hexsha": "b407e4d05b2068dd6fcd9e6bbb3ab221e87d5366", "size": 1107, "ext": "py", "lang": "Python", "max_stars_repo_path": "verilog/dv/subservient_test/sw/py_tools/gen_c_array_2d.py", "max_stars_repo_name": "yongatek/caravel_yonga-serv-accelerator", "max_stars_repo_head_hexsha": "8efa2eeaadaeba3c78de0546ee40c83433a56322", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2022-03-06T16:44:56.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-06T16:44:56.000Z", "max_issues_repo_path": "verilog/dv/subservient_test/sw/py_tools/gen_c_array_2d.py", "max_issues_repo_name": "yongatek/caravel_yonga-serv-accelerator", "max_issues_repo_head_hexsha": "8efa2eeaadaeba3c78de0546ee40c83433a56322", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "verilog/dv/subservient_test/sw/py_tools/gen_c_array_2d.py", "max_forks_repo_name": "yongatek/caravel_yonga-serv-accelerator", "max_forks_repo_head_hexsha": "8efa2eeaadaeba3c78de0546ee40c83433a56322", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.75, "max_line_length": 71, "alphanum_fraction": 0.5672990063, "include": true, "reason": "import numpy", "num_tokens": 317} |
import numpy as np
from numpy.linalg import inv
'''
Kalman smoothing of trajectories using INS transforms combined with
ICP scan matching
'''
def kf_pass(A0, B0, C1, d1, Q0, R1, mu00, Sigma00, u0, z1):
# Kalman filter forward pass
# Follows convention:
# x1 = A0*x0 + B0*u0 + eta, eta ~ N(0, Q0)
# z1 = C1*x1 + d1 + delta, delta ~ N(0, R1)
# Dynamics update
mu10 = np.dot(A0, mu00)
if B0 is not None and u0 is not None:
mu10 = mu10 + np.dot(B0, u0)
Sigma10 = np.dot(np.dot(A0, Sigma00), A0.T) + Q0
# Measurement update
if z1 is not None:
K = np.dot(np.dot(Sigma10, C1.T), inv(np.dot(np.dot(C1, Sigma10), C1.T) + R1))
mu11 = mu10 + np.dot(K, (z1 - np.dot(C1, mu10) + d1))
Sigma11 = np.dot(np.eye(A0.shape[0]) - np.dot(K, C1), Sigma10)
else:
mu11 = mu10
Sigma11 = Sigma10
# Return mu10 and Sigma10 as well for smoothing
return (mu10, Sigma10, mu11, Sigma11)
def ks_pass(A, mu00, mu10, mu1T, Sigma00, Sigma10, Sigma1T):
# Kalman smoother backward pass
L = np.dot(np.dot(Sigma00, A.T), inv(Sigma10))
mu0T = mu00 + np.dot(L, (mu1T - mu10))
Sigma0T = Sigma00 + np.dot(np.dot(L, Sigma1T - Sigma10), L.T)
return (mu0T, Sigma0T)
def plot_kfs_states(mus, Tmus, Sigmas, TSigmas, imu_states, coord=0):
# Plots to compare GPS positions to positions after filtering and smoothing
import matplotlib.pyplot as plt
cn = ['x', 'y', 'z'][coord]
ys1 = np.array([mu[coord] for mu in mus])
ys2 = np.array([mu[coord] for mu in Tmus])
ys3 = np.array(imu_states[:, coord])
sigmas = np.array([Sigma[coord, coord] for Sigma in Sigmas])
Tsigmas = np.array([Sigma[coord, coord] for Sigma in TSigmas])
plt.figure()
# Original GPS positions
plt.plot(range(nt), ys3, 'r-', label='$%s\mathrm{\ GPS}$' % cn)
# Kalman filter results
plt.plot(range(nt), ys1, 'b-', label='$%s\mathrm{\ filtered}$' % cn)
# Plot variance interval as well -- should be periodic
plt.plot(range(nt), ys1 + sigmas, 'b:',
label='$%s\mathrm{\ filtered\ } \pm\ \Sigma_{%s%s}$' % (cn, cn, cn))
plt.plot(range(nt), ys1 - sigmas, 'b:')
# Kalman smoother results
plt.plot(range(nt), ys2, 'g-', label='$%s\mathrm{\ smoothed}$' % cn)
plt.plot(range(nt), ys2 + Tsigmas, 'g:',
label='$%s\mathrm{\ smoothed\ } \pm\ \Sigma_{%s%s}$' % (cn, cn, cn))
plt.plot(range(nt), ys2 - Tsigmas, 'g:')
handles, labels = plt.gca().get_legend_handles_labels()
plt.legend(handles, labels, prop={'size': 20}, loc=0)
plt.show()
def plot_kfs_deltas(mus, Tmus, Sigmas, TSigmas, imu_states, coord=0):
# Plots to compare GPS positions to positions after filtering and smoothing
import matplotlib.pyplot as plt
cn = ['x', 'y', 'z'][coord]
T = len(mus)
ys1 = np.array([mu[coord] for mu in mus])
ys2 = np.array([mu[coord] for mu in Tmus])
ys3 = np.array(imu_states[:, coord])
dys1 = ys1[1:] - ys1[:-1]
dys2 = ys2[1:] - ys2[:-1]
dys3 = ys3[1:] - ys3[:-1]
plt.figure()
# Original GPS position deltas
plt.plot(range(1, T), dys3, 'r-', label='$\Delta %s\mathrm{\ GPS}$' % cn)
# Kalman filter deltas
plt.plot(range(1, T), dys1, 'b-', label='$\Delta %s\mathrm{\ filtered}$' % cn)
# Kalman smoother deltas
plt.plot(range(1, T), dys2, 'g-', label='$\Delta %s\mathrm{\ smoothed}$' % cn)
# Difference between smoother and filter
#plt.plot(range(1, T), dys2 - dys1, 'm-', label='$\Delta %s\mathrm{\ smoothed} - \Delta %s\mathrm{\ filtered}$' % (cn, cn))
handles, labels = plt.gca().get_legend_handles_labels()
plt.legend(handles, labels, prop={'size': 20}, loc=0)
plt.show()
if __name__ == '__main__':
import os
import sys
import h5py
from ArgParser import parse_args
from GPSReader import GPSReader
from GPSTransforms import IMUTransforms
from pipeline_config import ICP_TRANSFORMS_DIR, EXPORT_START, EXPORT_NUM,\
EXPORT_STEP
args = parse_args(sys.argv[1], sys.argv[2])
# Load INS data
gps_reader = GPSReader(args['gps'])
GPSData = gps_reader.getNumericData()
imu_transforms = IMUTransforms(GPSData)
if '--full' in sys.argv:
T_start = 0
T_end = GPSData.shape[0] - GPSData.shape[0] % EXPORT_STEP
else:
T_start = EXPORT_START
T_end = T_start + EXPORT_NUM * EXPORT_STEP
nt = T_end - T_start
imu_states = imu_transforms[T_start:T_end, 0:3, 3]
INS_VAR = 1.0 # PARAM
SCAN_VAR = 0.1 # PARAM
VEL_VAR = 0.5 # PARAM
# Load ICP transform corrections
icp_transforms = list()
for t in range(1, nt / EXPORT_STEP):
h5_file = os.path.join(ICP_TRANSFORMS_DIR, '%d.h5' % t)
print h5_file
h5f = h5py.File(h5_file, 'r')
transform = h5f['transform'][...]
h5f.close()
icp_transforms.append(transform)
# Set up variables for Kalman smoothing
# Same across all time steps
A = np.eye(6)
A[0, 3] = A[1, 4] = A[2, 5] = 1.0
B = None
C = np.concatenate((np.eye(3), np.eye(3)), axis=0)
C = np.concatenate((C, np.zeros((6, 3))), axis=1)
def has_obs(t):
#return t % EXPORT_STEP == EXPORT_STEP - 1 and t < nt - 1
return t % EXPORT_STEP == 0
# Dependent on t
us = list()
ds = list()
Qs = list()
Qs.append(np.diag([0.01, 0.3, 0.3, VEL_VAR, VEL_VAR, VEL_VAR])) # PARAM
Rs = list()
for t in range(1, nt):
us.append(None)
ds.append(np.zeros(6))
delta_state = np.abs(imu_states[t, :] - imu_states[t - 1, :])
Qs.append(np.diag(np.concatenate((10 * delta_state, delta_state))))
if has_obs(t):
# Got an ICP scan
Rs.append(np.diag([INS_VAR, INS_VAR, INS_VAR, SCAN_VAR, SCAN_VAR, SCAN_VAR]))
else:
# Didn't get an ICP scan
Rs.append(np.diag([INS_VAR, INS_VAR, INS_VAR, np.inf, np.inf, np.inf]))
# Run Kalman filter
mus = list() # mu_{t|t}
Sigmas = list() # Sigma_{t|t}
dmus = list() # mu_{t+1|t}
dSigmas = list() # Sigma_{t+1|t}
mu00 = np.concatenate((imu_states[0, :], imu_states[1, :] - imu_states[0, :]))
Sigma00 = Qs[0]
mus.append(mu00)
Sigmas.append(Sigma00)
for t in range(1, nt):
z = None
d = ds[t - 1]
u = us[t - 1]
if has_obs(t):
# Get an ICP observation
z = np.concatenate((imu_states[t, :], imu_states[t, :] + icp_transforms[(t / EXPORT_STEP) - 1][0:3, 3]))
else:
z = np.concatenate((imu_states[t, :], imu_states[t, :]))
dm, dSig, m, Sig = kf_pass(A, B, C, d, Qs[t], Rs[t-1], mus[-1], Sigmas[-1], u, z)
mus.append(m)
Sigmas.append(Sig)
dmus.append(dm)
dSigmas.append(dSig)
# Run Kalman smoothing
Tmus = list() # mu_{t|T}
TSigmas = list() # Sigma_{t|T}
Tmus.append(mus[-1])
TSigmas.append(Sigmas[-1])
for t in range(nt - 2, -1, -1):
Tm, TSig = ks_pass(A, mus[t], dmus[t], Tmus[0], Sigmas[t], dSigmas[t], TSigmas[0])
# Prepend
Tmus.insert(0, Tm)
TSigmas.insert(0, TSig)
# Plot stuff
plot_kfs_states(mus, Tmus, Sigmas, TSigmas, imu_states, coord=2)
#plot_kfs_deltas(mus, Tmus, Sigmas, TSigmas, imu_states, coord=2)
# Export smoothed transforms to file
imu_transforms_smoothed = imu_transforms
for t in range(T_start, T_end):
imu_transforms_smoothed[t, 0:3, 3] = Tmus[t - T_start][0:3]
# FIXME Pass in output file
np.savez('imu_transforms_smoothed.npz', data=imu_transforms_smoothed)
| {"hexsha": "ee0a6a214bad30ab7cc30d900afe2f30dda2537f", "size": 7642, "ext": "py", "lang": "Python", "max_stars_repo_path": "process/KalmanSmoother.py", "max_stars_repo_name": "sameeptandon/sail-car-log", "max_stars_repo_head_hexsha": "0ee3d598bb09d389bcbd2ebf73cd4b2411e796be", "max_stars_repo_licenses": ["BSD-2-Clause"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-02-24T03:11:13.000Z", "max_stars_repo_stars_event_max_datetime": "2021-02-24T03:11:13.000Z", "max_issues_repo_path": "process/KalmanSmoother.py", "max_issues_repo_name": "sameeptandon/sail-car-log", "max_issues_repo_head_hexsha": "0ee3d598bb09d389bcbd2ebf73cd4b2411e796be", "max_issues_repo_licenses": ["BSD-2-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "process/KalmanSmoother.py", "max_forks_repo_name": "sameeptandon/sail-car-log", "max_forks_repo_head_hexsha": "0ee3d598bb09d389bcbd2ebf73cd4b2411e796be", "max_forks_repo_licenses": ["BSD-2-Clause"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2015-03-18T14:36:04.000Z", "max_forks_repo_forks_event_max_datetime": "2018-07-04T02:57:24.000Z", "avg_line_length": 31.9748953975, "max_line_length": 127, "alphanum_fraction": 0.5902904999, "include": true, "reason": "import numpy,from numpy", "num_tokens": 2487} |
#!/usr/bin/python3
import cv2
import imageio
import logging
import numpy as np
import os
from pathlib import Path
import pdb
import time
import torch
import torch.nn as nn
import torch.utils.data
import torch.backends.cudnn as cudnn
from typing import List, Tuple
import tqdm
import mmcv
from mseg.utils.dir_utils import check_mkdir, create_leading_fpath_dirs
from mseg.utils.names_utils import get_universal_class_names
from mseg.utils.mask_utils_detectron2 import Visualizer
from mseg.utils.resize_util import resize_img_by_short_side
from mseg.taxonomy.taxonomy_converter import TaxonomyConverter
from mseg.taxonomy.naive_taxonomy_converter import NaiveTaxonomyConverter
from mseg_semantic.model.pspnet import PSPNet
from mseg_semantic.utils.avg_meter import AverageMeter
from mseg_semantic.utils.normalization_utils import (
get_imagenet_mean_std,
normalize_img
)
from mseg_semantic.utils.cv2_video_utils import VideoWriter, VideoReader
from mseg_semantic.utils import dataset, transform, config
from mseg_semantic.utils.img_path_utils import dump_relpath_txt
"""
Given a specified task, run inference on it using a pre-trained network.
Used for demos, and for testing on an evaluation dataset.
If projecting universal taxonomy into a different evaluation taxonomy,
the argmax comes *after* the linear mapping, so that probabilities can be
summed first.
Note: "base size" should be the length of the shorter side of the desired
inference image resolution. Note that the official PSPNet repo
(https://github.com/hszhao/semseg/blob/master/tool/test.py) treats
base_size as the longer side, which we found less intuitive given
screen resolution is generally described by shorter side length.
"base_size" is a very important parameter and will
affect results significantly.
"""
_ROOT = Path(__file__).resolve().parent.parent.parent
def get_logger():
"""
"""
logger_name = "main-logger"
logger = logging.getLogger(logger_name)
logger.setLevel(logging.INFO)
handler = logging.StreamHandler()
fmt = "[%(asctime)s %(levelname)s %(filename)s line %(lineno)d %(process)d] %(message)s"
handler.setFormatter(logging.Formatter(fmt))
logger.addHandler(handler)
return logger
logger = get_logger()
def get_unique_stem_from_last_k_strs(fpath: str, k: int = 4) -> str:
"""
Args:
- fpath
- k
Returns:
- unique_stem: string
"""
parts = Path(fpath).parts
unique_stem = '_'.join(parts[-4:-1]) + '_' + Path(fpath).stem
return unique_stem
class ToFlatLabel(object):
def __init__(self, tc_init, dataset):
self.dataset = dataset
self.tc = tc_init
def __call__(self, image, label):
return image, self.tc.transform_label(label, self.dataset)
def resize_by_scaled_short_side(
image: np.ndarray,
base_size: int,
scale: float
) -> np.ndarray:
"""
Args:
- image: Numpy array of shape ()
- scale:
Returns:
- image_scale:
"""
h, w, _ = image.shape
short_size = round(scale * base_size)
new_h = short_size
new_w = short_size
# Preserve the aspect ratio
if h > w:
new_h = round(short_size/float(w)*h)
else:
new_w = round(short_size/float(h)*w)
image_scale = cv2.resize(image, (new_w, new_h), interpolation=cv2.INTER_LINEAR)
return image_scale
def pad_to_crop_sz(
image: np.ndarray,
crop_h: int,
crop_w: int,
mean: Tuple[float,float,float]
) -> Tuple[np.ndarray,int,int]:
"""
Network input should be at least crop size, so we pad using mean values if
provided image is too small. No rescaling is performed here.
We use cv2.copyMakeBorder to copy the source image into the middle of a
destination image. The areas to the left, to the right, above and below the
copied source image will be filled with extrapolated pixels, in this case the
provided mean pixel intensity.
Args:
- image:
- crop_h: integer representing crop height
- crop_w: integer representing crop width
Returns:
- image: Numpy array of shape (crop_h x crop_w) representing a
square image, with short side of square is at least crop size.
- pad_h_half: half the number of pixels used as padding along height dim
- pad_w_half" half the number of pixels used as padding along width dim
"""
ori_h, ori_w, _ = image.shape
pad_h = max(crop_h - ori_h, 0)
pad_w = max(crop_w - ori_w, 0)
pad_h_half = int(pad_h / 2)
pad_w_half = int(pad_w / 2)
if pad_h > 0 or pad_w > 0:
image = cv2.copyMakeBorder(
src=image,
top=pad_h_half,
bottom=pad_h - pad_h_half,
left=pad_w_half,
right=pad_w - pad_w_half,
borderType=cv2.BORDER_CONSTANT,
value=mean
)
return image, pad_h_half, pad_w_half
def imread_rgb(img_fpath: str) -> np.ndarray:
"""
Returns:
- RGB 3 channel nd-array with shape H * W * 3
"""
bgr_img = cv2.imread(img_fpath, cv2.IMREAD_COLOR)
rgb_img = cv2.cvtColor(bgr_img, cv2.COLOR_BGR2RGB)
rgb_img = np.float32(rgb_img)
return rgb_img
class InferenceTask:
def __init__(self,
args,
base_size: int,
crop_h: int,
crop_w: int,
input_file: str,
output_taxonomy: str,
scales: List[float],
use_gpu: bool = True
):
"""
We always use the ImageNet mean and standard deviation for normalization.
mean: 3-tuple of floats, representing pixel mean value
std: 3-tuple of floats, representing pixel standard deviation
'args' should contain at least two fields (shown below).
Args:
- args:
- base_size:
- crop_h: integer representing crop height, e.g. 473
- crop_w: integer representing crop width, e.g. 473
- input_file: could be absolute path to .txt file, .mp4 file,
or to a directory full of jpg images
- output_taxonomy
- scales
- use_gpu
"""
self.args = args
assert isinstance(self.args.img_name_unique, bool)
assert isinstance(self.args.print_freq, int)
assert isinstance(self.args.num_model_classes, int)
assert isinstance(self.args.model_path, str)
self.pred_dim = self.args.num_model_classes
self.base_size = base_size
self.crop_h = crop_h
self.crop_w = crop_w
self.input_file = input_file
self.output_taxonomy = output_taxonomy
self.scales = scales
self.use_gpu = use_gpu
self.mean, self.std = get_imagenet_mean_std()
self.model = self.load_model(args)
self.softmax = nn.Softmax(dim=1)
self.gray_folder = None # optional, intended for dataloader use
self.data_list = None # optional, intended for dataloader use
if self.output_taxonomy != 'universal':
assert isinstance(self.args.dataset, str)
self.dataset_name = args.dataset
self.tc = TaxonomyConverter()
if self.args.arch == 'psp':
assert isinstance(self.args.zoom_factor, int)
assert isinstance(self.args.network_name, int)
self.id_to_class_name_map = {
i: classname for i, classname in enumerate(get_universal_class_names())
}
# indicate which scales were used to make predictions
# (multi-scale vs. single-scale)
self.scales_str = 'ms' if len(args.scales) > 1 else 'ss'
def load_model(self, args):
"""
Load Pytorch pre-trained model from disk of type
torch.nn.DataParallel. Note that
`args.num_model_classes` will be size of logits output.
Args:
- args:
Returns:
- model
"""
if args.arch == 'psp':
model = PSPNet(
layers=args.layers,
classes=args.num_model_classes,
zoom_factor=args.zoom_factor,
pretrained=False,
network_name=args.network_name
)
elif args.arch == 'hrnet':
from mseg_semantic.model.seg_hrnet import get_configured_hrnet
# note apex batchnorm is hardcoded
model = get_configured_hrnet(args.num_model_classes, load_imagenet_model=False)
elif args.arch == 'hrnet_ocr':
from mseg_semantic.model.seg_hrnet_ocr import get_configured_hrnet_ocr
model = get_configured_hrnet_ocr(args.num_model_classes)
# logger.info(model)
model = torch.nn.DataParallel(model)
if self.use_gpu:
model = model.cuda()
cudnn.benchmark = True
if os.path.isfile(args.model_path):
logger.info(f"=> loading checkpoint '{args.model_path}'")
if self.use_gpu:
checkpoint = torch.load(args.model_path)
else:
checkpoint = torch.load(args.model_path, map_location='cpu')
model.load_state_dict(checkpoint['state_dict'], strict=False)
logger.info(f"=> loaded checkpoint '{args.model_path}'")
else:
raise RuntimeError(f"=> no checkpoint found at '{args.model_path}'")
return model
def execute(self) -> None:
"""
Execute the demo, i.e. feed all of the desired input through the
network and obtain predictions. Gracefully handles .txt,
or video file (.mp4, etc), or directory input.
"""
logger.info('>>>>>>>>>>>>>>>> Start inference task >>>>>>>>>>>>>>>>')
self.model.eval()
suffix = self.input_file[-4:]
is_dir = os.path.isdir(self.input_file)
is_img = suffix in ['.png', '.jpg']
is_vid = suffix in ['.mp4', '.avi', '.mov']
if is_img:
self.render_single_img_pred()
elif is_dir:
# argument is a path to a directory
self.create_path_lists_from_dir()
test_loader = self.create_test_loader()
self.execute_on_dataloader(test_loader)
elif is_vid:
# argument is a video
self.execute_on_video()
elif not is_dir and not is_img and self.args.dataset != 'default':
# evaluate on a train or test dataset
test_loader = self.create_test_loader()
self.execute_on_dataloader(test_loader)
else:
logger.info('Error: Unknown input type')
logger.info('<<<<<<<<<<<<<<<<< Inference task completed <<<<<<<<<<<<<<<<<')
def render_single_img_pred(self, min_resolution: int = 1080):
"""
Since overlaid class text is difficult to read below 1080p, we upsample
predictions.
"""
in_fname_stem = Path(self.input_file).stem
output_gray_fpath = f'{in_fname_stem}_gray.jpg'
output_demo_fpath = f'{in_fname_stem}_overlaid_classes.jpg'
logger.info(f'Write image prediction to {output_demo_fpath}')
rgb_img = imread_rgb(self.input_file)
pred_label_img = self.execute_on_img(rgb_img)
# avoid blurry images by upsampling RGB before overlaying text
if np.amin(rgb_img.shape[:2]) < min_resolution:
rgb_img = resize_img_by_short_side(rgb_img, min_resolution, 'rgb')
pred_label_img = resize_img_by_short_side(pred_label_img, min_resolution, 'label')
metadata = None
frame_visualizer = Visualizer(rgb_img, metadata)
overlaid_img = frame_visualizer.overlay_instances(
label_map=pred_label_img,
id_to_class_name_map=self.id_to_class_name_map
)
imageio.imwrite(output_demo_fpath, overlaid_img)
imageio.imwrite(output_gray_fpath, pred_label_img)
def create_path_lists_from_dir(self) -> None:
"""
Populate a .txt file with relative paths that will be used to create
a Pytorch dataloader.
Args:
- None
Returns:
- None
"""
self.args.data_root = self.input_file
txt_output_dir = str(Path(f'{_ROOT}/temp_files').resolve())
txt_save_fpath = dump_relpath_txt(self.input_file, txt_output_dir)
self.args.test_list = txt_save_fpath
def create_test_loader(self):
"""
Create a Pytorch dataloader from a dataroot and list of
relative paths.
"""
test_transform = transform.Compose([transform.ToTensor()])
test_data = dataset.SemData(
split=self.args.split,
data_root=self.args.data_root,
data_list=self.args.test_list,
transform=test_transform
)
index_start = self.args.index_start
if self.args.index_step == 0:
index_end = len(test_data.data_list)
else:
index_end = min(index_start + args.index_step, len(test_data.data_list))
test_data.data_list = test_data.data_list[index_start:index_end]
self.data_list = test_data.data_list
test_loader = torch.utils.data.DataLoader(
test_data,
batch_size=1,
shuffle=False,
num_workers=self.args.workers,
pin_memory=True
)
return test_loader
def execute_on_img_single(self, image: np.ndarray) -> np.ndarray:
"""
Rather than feeding in crops w/ sliding window across the full-res image, we
downsample/upsample the image to a default inference size. This may differ
from the best training size.
For example, if trained on small images, we must shrink down the image in
testing (preserving the aspect ratio), based on the parameter "base_size",
which is the short side of the image.
Args:
- image: Numpy array representing RGB image
Returns:
- gray_img: prediction, representing predicted label map
"""
h, w, _ = image.shape
scale = 1.
image_scale = resize_by_scaled_short_side(image, self.base_size, scale)
prediction = self.scale_process_cuda(image_scale, h, w)
prediction = prediction.argmax(axis=2)
gray_img = np.uint8(prediction)
return gray_img
def execute_on_img(self, image: np.ndarray) -> np.ndarray:
"""
Rather than feeding in crops w/ sliding window across the full-res image, we
downsample/upsample the image to a default inference size. This may differ
from the best training size.
For example, if trained on small images, we must shrink down the image in
testing (preserving the aspect ratio), based on the parameter "base_size",
which is the short side of the image.
Args:
- image: Numpy array representing RGB image
Returns:
- gray_img: prediction, representing predicted label map
"""
h, w, _ = image.shape
prediction = np.zeros((h, w, self.pred_dim), dtype=float)
prediction = torch.Tensor(prediction).cuda()
for scale in self.scales:
image_scale = resize_by_scaled_short_side(image, self.base_size, scale)
prediction = prediction + torch.Tensor(self.scale_process_cuda(image_scale, h, w)).cuda()
prediction /= len(self.scales)
prediction = torch.argmax(prediction, axis=2)
prediction = prediction.data.cpu().numpy()
gray_img = np.uint8(prediction)
return gray_img
def execute_on_video(self, max_num_frames: int = 5000, min_resolution: int = 1080) -> None:
"""
input_file is a path to a video file.
Read frames from an RGB video file, and write overlaid
predictions into a new video file.
Args:
- None
Returns:
- None
"""
in_fname_stem = Path(self.input_file).stem
out_fname = f'{in_fname_stem}_{self.args.model_name}_universal'
out_fname += f'_scales_{self.scales_str}_base_sz_{self.args.base_size}.mp4'
output_video_fpath = f'{_ROOT}/temp_files/{out_fname}'
create_leading_fpath_dirs(output_video_fpath)
logger.info(f'Write video to {output_video_fpath}')
writer = VideoWriter(output_video_fpath)
reader = VideoReader(self.input_file)
for frame_idx in range(reader.num_frames):
logger.info(f'On image {frame_idx}/{reader.num_frames}')
rgb_img = reader.get_frame()
if frame_idx > max_num_frames:
break
pred_label_img = self.execute_on_img(rgb_img)
# avoid blurry images by upsampling RGB before overlaying text
if np.amin(rgb_img.shape[:2]) < min_resolution:
rgb_img = resize_img_by_short_side(rgb_img, min_resolution, 'rgb')
pred_label_img = resize_img_by_short_side(pred_label_img, min_resolution, 'label')
metadata = None
frame_visualizer = Visualizer(rgb_img, metadata)
output_img = frame_visualizer.overlay_instances(
label_map=pred_label_img,
id_to_class_name_map=self.id_to_class_name_map
)
writer.add_frame(output_img)
reader.complete()
writer.complete()
def execute_on_dataloader(self, test_loader: torch.utils.data.dataloader.DataLoader):
"""
Args:
- test_loader:
Returns:
- None
"""
if self.args.save_folder == 'default':
self.args.save_folder = f'{_ROOT}/temp_files/{self.args.model_name}_{self.args.dataset}_universal_{self.scales_str}/{self.args.base_size}'
os.makedirs(self.args.save_folder, exist_ok=True)
gray_folder = os.path.join(self.args.save_folder, 'gray')
self.gray_folder = gray_folder
check_mkdir(self.gray_folder)
data_time = AverageMeter()
batch_time = AverageMeter()
end = time.time()
results = dict() # path: label_map
for i, (input, _) in enumerate(tqdm.tqdm(test_loader)):
data_time.update(time.time() - end)
# convert Pytorch tensor -> Numpy
input = np.squeeze(input.numpy(), axis=0)
image = np.transpose(input, (1, 2, 0))
gray_img = self.execute_on_img_single(image)
batch_time.update(time.time() - end)
end = time.time()
image_name, _ = self.data_list[i]
img_id = image_name[len(self.input_file):]
results[img_id] = gray_img
# todo: update to time remaining.
if 0 and ((i + 1) % self.args.print_freq == 0) or (i + 1 == len(test_loader)):
logger.info('Test: [{}/{}] '
'Data {data_time.val:.3f} ({data_time.avg:.3f}) '
'Batch {batch_time.val:.3f} ({batch_time.avg:.3f}).'.format(i + 1, len(test_loader),
data_time=data_time,
batch_time=batch_time))
mmcv.dump(results, os.path.join(gray_folder, 'label_maps.pkl'))
def scale_process_cuda(self, image: np.ndarray, h: int, w: int, stride_rate: float = 2/3):
""" First, pad the image. If input is (384x512), then we must pad it up to shape
to have shorter side "scaled base_size".
Then we perform the sliding window on this scaled image, and then interpolate
(downsample or upsample) the prediction back to the original one.
At each pixel, we increment a counter for the number of times this pixel
has passed through the sliding window.
Args:
- image: Array, representing image where shortest edge is adjusted to base_size
- h: integer representing raw image height, e.g. for NYU it is 480
- w: integer representing raw image width, e.g. for NYU it is 640
- stride_rate
Returns:
- prediction: predictions with shorter side equal to self.base_size
"""
start1 = time.time()
ori_h, ori_w, _ = image.shape
image, pad_h_half, pad_w_half = pad_to_crop_sz(image, self.crop_h, self.crop_w, self.mean)
new_h, new_w, _ = image.shape
stride_h = int(np.ceil(self.crop_h*stride_rate))
stride_w = int(np.ceil(self.crop_w*stride_rate))
grid_h = int(np.ceil(float(new_h-self.crop_h)/stride_h) + 1)
grid_w = int(np.ceil(float(new_w-self.crop_w)/stride_w) + 1)
prediction_crop = torch.zeros((self.pred_dim, new_h, new_w)).cuda()
count_crop = torch.zeros((new_h, new_w)).cuda()
start = time.time()
for index_h in range(0, grid_h):
for index_w in range(0, grid_w):
s_h = index_h * stride_h
e_h = min(s_h + self.crop_h, new_h)
s_h = e_h - self.crop_h
s_w = index_w * stride_w
e_w = min(s_w + self.crop_w, new_w)
s_w = e_w - self.crop_w
image_crop = image[s_h:e_h, s_w:e_w].copy()
count_crop[s_h:e_h, s_w:e_w] += 1
prediction_crop[:, s_h:e_h, s_w:e_w] += self.net_process(image_crop, flip=False)
start = time.time()
prediction_crop /= count_crop.unsqueeze(0)
# disregard predictions from padded portion of image
prediction_crop = prediction_crop[:, pad_h_half:pad_h_half+ori_h, pad_w_half:pad_w_half+ori_w]
# CHW -> HWC
prediction_crop = prediction_crop.permute(1,2,0)
prediction_crop = prediction_crop.data.cpu().numpy()
prediction = prediction_crop
# upsample or shrink predictions back down to scale=1.0
#prediction = cv2.resize(prediction_crop, (w, h), interpolation=cv2.INTER_LINEAR)
return prediction
def net_process(self, image: np.ndarray, flip: bool = True):
""" Feed input through the network.
In addition to running a crop through the network, we can flip
the crop horizontally, run both crops through the network, and then
average them appropriately.
Args:
- model:
- image:
- flip: boolean, whether to average with flipped patch output
Returns:
- output:
"""
input = torch.from_numpy(image.transpose((2, 0, 1))).float()
normalize_img(input, self.mean, self.std)
input = input.unsqueeze(0)
if self.use_gpu:
input = input.cuda()
if flip:
# add another example to batch dimension, that is the flipped crop
input = torch.cat([input, input.flip(3)], 0)
with torch.no_grad():
output = self.model(input)
_, _, h_i, w_i = input.shape
_, _, h_o, w_o = output.shape
if (h_o != h_i) or (w_o != w_i):
output = F.interpolate(output, (h_i, w_i), mode='bilinear', align_corners=True)
if self.output_taxonomy == 'universal':
output = self.softmax(output)
elif self.output_taxonomy == 'test_dataset':
output = self.convert_pred_to_label_tax_and_softmax(output)
else:
print('Unrecognized output taxonomy. Quitting....')
quit()
# print(time.time() - start1, image_scale.shape, h, w)
if flip:
# take back out the flipped crop, correct its orientation, and average result
output = (output[0] + output[1].flip(2)) / 2
else:
output = output[0]
# output = output.data.cpu().numpy()
# convert CHW to HWC order
# output = output.transpose(1, 2, 0)
# output = output.permute(1,2,0)
return output
def convert_pred_to_label_tax_and_softmax(self, output):
"""
"""
if not self.args.universal:
output = self.tc.transform_predictions_test(output, self.args.dataset)
else:
output = self.tc.transform_predictions_universal(output, self.args.dataset)
return output
# def convert_label_to_pred_taxonomy(self, target):
# """
# """
# if self.args.universal:
# _, target = ToFlatLabel(self.tc, self.args.dataset)(target, target)
# return target.type(torch.uint8).numpy()
# else:
# return target
if __name__ == '__main__':
pass
| {"hexsha": "604d267d1a187632c0669c8cf99ed95b2f71a14d", "size": 24664, "ext": "py", "lang": "Python", "max_stars_repo_path": "mseg_semantic/tool/inference_task.py", "max_stars_repo_name": "Jokoe66/mseg-semantic", "max_stars_repo_head_hexsha": "d3aedcfe03ae66d07b9e7b0ba05e0d4b715d11ca", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "mseg_semantic/tool/inference_task.py", "max_issues_repo_name": "Jokoe66/mseg-semantic", "max_issues_repo_head_hexsha": "d3aedcfe03ae66d07b9e7b0ba05e0d4b715d11ca", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "mseg_semantic/tool/inference_task.py", "max_forks_repo_name": "Jokoe66/mseg-semantic", "max_forks_repo_head_hexsha": "d3aedcfe03ae66d07b9e7b0ba05e0d4b715d11ca", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.2173274596, "max_line_length": 150, "alphanum_fraction": 0.6196480701, "include": true, "reason": "import numpy", "num_tokens": 5585} |
""" Licensed under a 3-clause BSD style license - see LICENSE.rst
This script shows how to extract features from raw images.
The use of this script requires a mask file,
which has to be created with the script generate_mask.py
(c) 2020, Michael Mommert ([email protected])
"""
import os
import requests
import numpy as np
import cloudynight
# instantiate AllskyCamera object and define example image repository
# relative to base directory (defined in __init__.py: example_data/)
cam = cloudynight.AllskyCamera('images')
# this will create a directory `workbench/images` in the repository root;
# `images` is named after the raw image directory (could be a night directory)
# read in mask file; has to be created with generate_mask.fits!
cam.read_mask(filename='../workbench/images/mask.fits')
# read in image data
cam.read_data_from_directory(only_new_data=False)
# this will automatically crop the images
# only_new_data=True is necessary to read all data in the directory
# generate subregions
cam.generate_subregions()
# use wrapper to process all images
# `no_upload=True` can be removed if the webapp is setup properly
cam.process_and_upload_data(no_upload=True)
# plot background median values per subregion for all images
for img in cam.imgdata:
sourcedens_overlay = img.create_overlay(overlaytype='bkgmedian')
img.write_image(overlay=sourcedens_overlay, mask=cam.maskdata,
filename=
os.path.join(cloudynight.conf.DIR_ARCHIVE,
'{}_bkgmedian.png'.format(
img.filename[:img.filename.find('.fit')])))
| {"hexsha": "68d5d43ec33f1723573b3b483777e660e26958b2", "size": 1632, "ext": "py", "lang": "Python", "max_stars_repo_path": "scripts/extract_features.py", "max_stars_repo_name": "mommermi/cloudynight", "max_stars_repo_head_hexsha": "6fcc72e4844075f65610b5dfb96857e87c507cf6", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 20, "max_stars_repo_stars_event_min_datetime": "2020-01-03T20:05:59.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-26T23:37:50.000Z", "max_issues_repo_path": "scripts/extract_features.py", "max_issues_repo_name": "mommermi/cloudynight", "max_issues_repo_head_hexsha": "6fcc72e4844075f65610b5dfb96857e87c507cf6", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "scripts/extract_features.py", "max_forks_repo_name": "mommermi/cloudynight", "max_forks_repo_head_hexsha": "6fcc72e4844075f65610b5dfb96857e87c507cf6", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 7, "max_forks_repo_forks_event_min_datetime": "2020-01-05T04:39:40.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-20T21:47:56.000Z", "avg_line_length": 37.0909090909, "max_line_length": 78, "alphanum_fraction": 0.7395833333, "include": true, "reason": "import numpy", "num_tokens": 358} |
import numpy as np
def running_median(stream):
med = []
for i in range(1, len(stream)+1):
med.append(np.median(stream[:i]))
return med
if __name__ == '__main__':
print(running_median([2, 1, 4, 7, 2, 0, 5]))
# 2 1.5 2 3.0 2 2.0 2
| {"hexsha": "5c936070a9bdd811d6c2f2e3492e69664111546c", "size": 261, "ext": "py", "lang": "Python", "max_stars_repo_path": "day47/Solution.py", "max_stars_repo_name": "silvioedu/DailyInterview", "max_stars_repo_head_hexsha": "976aec8e001344931aed19f20ccffc605fe063fd", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "day47/Solution.py", "max_issues_repo_name": "silvioedu/DailyInterview", "max_issues_repo_head_hexsha": "976aec8e001344931aed19f20ccffc605fe063fd", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "day47/Solution.py", "max_forks_repo_name": "silvioedu/DailyInterview", "max_forks_repo_head_hexsha": "976aec8e001344931aed19f20ccffc605fe063fd", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 18.6428571429, "max_line_length": 48, "alphanum_fraction": 0.5747126437, "include": true, "reason": "import numpy", "num_tokens": 97} |
#!/usr/bin/env python3
import itertools
import time
from keithley2600b import SMU
import click
import zerorpc
import sys
import yaml
import numpy as np
import tempfile
from scipy import stats
from fabric import Connection
import matplotlib.pyplot as plt
from sklearn.linear_model import LinearRegression
import msgpack
import msgpack_numpy
V_REF_DAC = 2.5
G_DAC_A = 1.0
G_DAC_V = 2.0
M_DAC = 16
adict = {"voltage_shp_V": 0,
"voltage_shp_raw": 1,
"voltage_ref_V": 2,
"current_shp_A": 3,
"current_shp_raw": 4,
"current_ref_A" : 5,
}
INSTR_EMU = """
---------------------- Characterize Emulation-Frontend -----------------------
- remove targets from target-ports
- Connect SMU channel A Lo to P10-1 (Target-A GND)
- Connect SMU channel A Hi to P10-2 (Target-A Voltage)
- Resistor (~200 Ohm) and Cap (1-10 uF) between
- P11-1 (Target-B GND)
- P11-2 (Target-B Voltage)
"""
def convert_dac_voltage_to_raw(value_V: float) -> int:
return int((value_V * (2 ** M_DAC)) / (G_DAC_V * V_REF_DAC))
def meas_emulator_setpoint(rpc_client, smu_channel, voltage_V, current_A):
voltage_V = min(max(voltage_V, 0.0), 5.0)
current_A = min(max(current_A, 0.0), 0.050)
smu_channel.configure_isource(range=0.050)
smu_channel.set_current(-current_A, vlimit=5.0) # negative current, because smu acts as a drain
smu_channel.set_output(True)
# write both dac-channels of emulator
rpc_client.set_aux_target_voltage_raw(2 ** 20 + convert_dac_voltage_to_raw(voltage_V))
time.sleep(0.2)
rpc_client.sample_emu_cal(3) # seems to solve some readout-errors at start
meas_enc = rpc_client.sample_emu_cal(10)
meas_rec = msgpack.unpackb(meas_enc, object_hook=msgpack_numpy.decode)
adc_current_raw = float(np.mean(meas_rec))
# voltage measurement only for information, drop might appear severe, because 4port-measurement is not active
smu_voltage = smu_channel.measure_voltage(range=5.0, nplc=1.0)
print(f" reference: {current_A} A @ {smu_voltage:.4f} V; shepherd: "
f"mean={adc_current_raw:.2f}, "
f"[{np.min(meas_rec)}, {np.max(meas_rec)}], "
f"stddev={np.std(meas_rec):.2f} "
f"@ {voltage_V} V")
smu_channel.set_output(False)
return meas_rec, smu_voltage, current_A
def measurement_dynamic(values: list, dict_val: str = "shepherd_raw") -> float:
value_min = min([value[dict_val] for value in values])
value_max = max([value[dict_val] for value in values])
return value_max - value_min
@click.group(context_settings=dict(help_option_names=["-h", "--help"], obj={}))
def cli():
pass
@cli.command()
@click.argument("host", type=str)
@click.option("--user", "-u", type=str, default="joe", help="Host Username")
@click.option("--password", "-p", type=str, default=None, help="Host User Password -> only needed when key-credentials are missing")
@click.option("--outfile", "-o", type=click.Path(), help="save file, if no filename is provided the hostname will be used")
@click.option("--smu-ip", type=str, default="192.168.1.108")
@click.option("--all", "all_", is_flag=True)
@click.option("--harvesting", is_flag=True)
@click.option("--emulation", is_flag=True)
def measure(host, user, password, outfile, smu_ip, all_, harvesting, emulation):
if all_:
if harvesting or emulation:
raise click.UsageError("Either provide --all or individual flags")
harvesting = True
emulation = True
if not any([all_, harvesting, emulation]):
harvesting = True
emulation = True
if password is not None:
fabric_args = {"password": password}
else:
fabric_args = {}
rpc_client = zerorpc.Client(timeout=60, heartbeat=20)
measurement_dict = dict()
if harvesting:
raise click.UsageError("Currently not implemented")
with SMU.ethernet_device(smu_ip) as smu, Connection(host, user=user, connect_kwargs=fabric_args) as cnx:
# TODO: enable 4 Port Mode if possible
res = cnx.sudo("systemctl restart shepherd-rpc", hide=True, warn=True)
#time.sleep(4)
rpc_client.connect(f"tcp://{ host }:4242")
if emulation:
click.echo(INSTR_EMU)
usr_conf = click.confirm("Confirm that everything is set up ...")
if usr_conf:
measurement_dict["emulation"] = {
"dac_voltage_a": list(),
"dac_voltage_b": list(),
"adc_current": list(),
"adc_voltage": list(), # not existing currently
}
mode_old = rpc_client.switch_shepherd_mode("emulation_cal")
print(f"Measurement - Emulation - Current - ADC Channel A - Target A")
voltages_V = [0.0, 0.05, 0.5, 1.0, 1.5, 2.0, 2.5, 3.0, 3.5, 4.0, 4.5]
currents_A = [0e-3, 1e-6, 5e-6, 10e-6, 50e-6, 100e-6, 500e-6,
1e-3, 5e-3, 10e-3, 15e-3, 20e-3, 25e-3,
30e-3, 35e-3, 40e-3, 45e-3]
rpc_client.select_target_for_power_tracking(True) # targetA-Port will get the monitored dac-channel-b
results_a = np.zeros([6, len(voltages_V) * len(currents_A)], dtype=object)
for index, (current, voltage) in enumerate(itertools.product(currents_A, voltages_V)):
cdata, v_meas, c_set = meas_emulator_setpoint(rpc_client, smu.A, voltage, current)
results_a[0][index] = voltage
results_a[1][index] = convert_dac_voltage_to_raw(voltage)
results_a[2][index] = v_meas
results_a[3][index] = current
results_a[4][index] = cdata
results_a[5][index] = c_set
print(f"Measurement - Emulation - Current - ADC Channel A - Target B")
voltages_V = np.linspace(0.0, 4.5, 46)
currents_A = [20e-3]
rpc_client.select_target_for_power_tracking(False) # targetB-Port will get the monitored dac-channel-b
results_b = np.zeros([6, len(voltages_V) * len(currents_A)], dtype=object)
for index, (current, voltage) in enumerate(itertools.product(currents_A, voltages_V)):
cdata, v_meas, c_set = meas_emulator_setpoint(rpc_client, smu.B, voltage, current)
results_b[0][index] = voltage
results_b[1][index] = convert_dac_voltage_to_raw(voltage)
results_b[2][index] = v_meas
results_b[3][index] = current
results_b[4][index] = cdata
results_b[5][index] = c_set
np.savez_compressed("profile_emu_channels.npz", a=results_a, b=results_b)
rpc_client.switch_shepherd_mode(mode_old)
if __name__ == "__main__":
cli()
| {"hexsha": "c97daf98062dc397d4bd4c7f2e83bedbb5ad6fea", "size": 6915, "ext": "py", "lang": "Python", "max_stars_repo_path": "extra/profile_frontend_measure.py", "max_stars_repo_name": "orgua/shepherd", "max_stars_repo_head_hexsha": "347af12740b008a78b7012ae748d9eb2b147274b", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "extra/profile_frontend_measure.py", "max_issues_repo_name": "orgua/shepherd", "max_issues_repo_head_hexsha": "347af12740b008a78b7012ae748d9eb2b147274b", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 5, "max_issues_repo_issues_event_min_datetime": "2020-10-14T14:30:55.000Z", "max_issues_repo_issues_event_max_datetime": "2020-11-04T15:52:07.000Z", "max_forks_repo_path": "extra/profile_frontend_measure.py", "max_forks_repo_name": "orgua/shepherd", "max_forks_repo_head_hexsha": "347af12740b008a78b7012ae748d9eb2b147274b", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-07-15T09:21:28.000Z", "max_forks_repo_forks_event_max_datetime": "2020-07-15T09:21:28.000Z", "avg_line_length": 39.5142857143, "max_line_length": 132, "alphanum_fraction": 0.6245842372, "include": true, "reason": "import numpy,from scipy", "num_tokens": 1858} |
# Copyright 2016-present CERN – European Organization for Nuclear Research
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from unittest import TestCase
import pandas as pd
import numpy as np
from qf_lib.common.utils.data_cleaner import DataCleaner
from qf_lib.containers.dataframe.simple_returns_dataframe import SimpleReturnsDataFrame
from qf_lib.containers.series.simple_returns_series import SimpleReturnsSeries
from qf_lib_tests.helpers.testing_tools.containers_comparison import assert_dataframes_equal
class TestDataCleaner(TestCase):
def setUp(self):
self.test_dataframe = self._create_test_dataframe()
self.test_benchmark = self._create_test_benchmark()
self.data_cleaner = DataCleaner(self.test_dataframe)
@classmethod
def _create_test_dataframe(cls):
values = [[np.nan, 0.0, 0.0, 0.0, 0.0],
[1.0, np.nan, 1.0, 1.0, 1.0],
[2.0, np.nan, np.nan, 2.0, 2.0],
[3.0, 3.0, 3.0, np.nan, 3.0],
[4.0, 4.0, 4.0, 4.0, 4.0],
[5.0, 5.0, 5.0, 5.0, 5.0]]
index = pd.date_range(start='2015-01-01', periods=6)
columns = ['a', 'b', 'c', 'd', 'e']
dataframe = SimpleReturnsDataFrame(data=values, index=index, columns=columns)
return dataframe
@classmethod
def _create_test_benchmark(cls):
values = [1.0, 2.0, 3.0, 4.0, 5.0, 6.0]
index = pd.date_range(start='2015-01-02', periods=6)
return SimpleReturnsSeries(data=values, index=index, name='Test prices')
def test_proxy_using_values(self):
expected_values = [[0.0, 0.0, 0.0, 0.0],
[1.0, 1.0, 1.0, 1.0],
[2.0, 0.0, 2.0, 2.0],
[3.0, 3.0, 0.0, 3.0],
[4.0, 4.0, 4.0, 4.0],
[5.0, 5.0, 5.0, 5.0]]
expected_columns = ['a', 'c', 'd', 'e']
expected_dates = self.test_dataframe.index.copy()
expected_dataframe = SimpleReturnsDataFrame(data=expected_values, columns=expected_columns, index=expected_dates)
self.data_cleaner.threshold = 0.2
actual_dataframe = self.data_cleaner.proxy_using_value(proxy_value=0.0)
assert_dataframes_equal(expected_dataframe, actual_dataframe)
def test_proxy_using_regression(self):
expected_values = [[np.nan, 0.0, 0.0, 0.0],
[1.0, 1.0, 1.0, 1.0],
[2.0, 2.0, 2.0, 2.0],
[3.0, 3.0, 3.0, 3.0],
[4.0, 4.0, 4.0, 4.0],
[5.0, 5.0, 5.0, 5.0]]
expected_columns = ['a', 'c', 'd', 'e']
expected_dates = self.test_dataframe.index.copy()
expected_dataframe = SimpleReturnsDataFrame(data=expected_values, columns=expected_columns, index=expected_dates)
self.data_cleaner.threshold = 0.2
actual_dataframe = self.data_cleaner.proxy_using_regression(benchmark_tms=self.test_benchmark,
columns_type=SimpleReturnsSeries)
assert_dataframes_equal(expected_dataframe, actual_dataframe)
| {"hexsha": "7c4afee6bcb429d01794e16fab3ddf6dd6f70382", "size": 3734, "ext": "py", "lang": "Python", "max_stars_repo_path": "qf_lib_tests/unit_tests/common/utils/test_data_cleaner.py", "max_stars_repo_name": "webclinic017/qf-lib", "max_stars_repo_head_hexsha": "96463876719bba8a76c8269cef76addf3a2d836d", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 198, "max_stars_repo_stars_event_min_datetime": "2019-08-16T15:09:23.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-30T12:44:00.000Z", "max_issues_repo_path": "qf_lib_tests/unit_tests/common/utils/test_data_cleaner.py", "max_issues_repo_name": "webclinic017/qf-lib", "max_issues_repo_head_hexsha": "96463876719bba8a76c8269cef76addf3a2d836d", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 13, "max_issues_repo_issues_event_min_datetime": "2021-01-07T10:15:19.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-29T13:01:47.000Z", "max_forks_repo_path": "qf_lib_tests/unit_tests/common/utils/test_data_cleaner.py", "max_forks_repo_name": "webclinic017/qf-lib", "max_forks_repo_head_hexsha": "96463876719bba8a76c8269cef76addf3a2d836d", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 29, "max_forks_repo_forks_event_min_datetime": "2019-08-16T15:21:28.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-23T09:53:49.000Z", "avg_line_length": 43.9294117647, "max_line_length": 121, "alphanum_fraction": 0.6063202999, "include": true, "reason": "import numpy", "num_tokens": 1031} |
[STATEMENT]
lemma Assigno_undef: "(Assigno x \<theta> = undefg) = (\<theta>=undeft)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (Assigno x \<theta> = undefg) = (\<theta> = undeft)
[PROOF STEP]
by (metis Assigno.elims option.distinct(1)) | {"llama_tokens": 104, "file": "Differential_Game_Logic_USubst", "length": 1} |
import os
import gym
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from torch.autograd import Variable
from torch.distributions import Categorical
import matplotlib.pyplot as plt
env = gym.make("CartPole-v0")
env.reset()
class Policy(nn.Module):
def __init__(self):
super(Policy, self).__init__()
self.num_actions = env.action_space.n
self.state_dim = env.observation_space.shape[0]
self.fc1 = nn.Linear(self.state_dim, 256)
self.fc2 = nn.Linear(256, self.num_actions)
def forward(self, x):
x = F.relu(self.fc1(x))
x = self.fc2(x)
return x
restore = True
if restore and os.path.isfile("polict.pt"):
policy = torch.load("policy.pt")
else:
policy = Policy()
optimizer = optim.Adam(policy.parameters(), lr=0.001)
def update_policy(states, actions, rewards, log_probs, gamma=0.99):
"""
Расчет потерь, вычисление градиентов, обратное распространение и обновление параметров нейронной сети.
"""
loss = []
dis_rewards = rewards[:]
for i in range(len(dis_rewards) - 2, -1, -1):
dis_rewards[i] = dis_rewards[i] + gamma * dis_rewards[i + 1]
dis_rewards = torch.tensor(dis_rewards)
for log_prob, reward in zip(log_probs, dis_rewards):
loss.append(-log_prob * reward)
loss = torch.cat(loss).sum()
optimizer.zero_grad()
loss.backward()
optimizer.step()
def get_policy_values(state):
"""
Рассчет ненормализованных значений policy по входному массиву значений
"""
state = Variable(torch.from_numpy(state)).type(torch.FloatTensor).unsqueeze(0)
policy_values = policy(state)
return policy_values
def generate_episode(t_max=1000):
"""
Создание эпизода. Сохранение состояний, действий, наград и регистрация вероятностей. Обновление policy
"""
states, actions, rewards, log_probs = [], [], [], []
s = env.reset()
for t in range(t_max):
action_probs = F.softmax(get_policy_values(s), dim=-1)
sampler = Categorical(action_probs)
a = sampler.sample()
log_prob = sampler.log_prob(a)
new_s, r, done, _ = env.step(a.item())
states.append(s)
actions.append(a)
rewards.append(r)
log_probs.append(log_prob)
s = new_s
if done:
break
update_policy(states, actions, rewards, log_probs)
return sum(rewards)
def play_episodes(num_episodes=10, render=False):
"""
Запускаем игру используя обученную policy
"""
for i in range(num_episodes):
rewards = []
s = env.reset()
for _ in range(1000):
if render:
env.render()
action_probs = F.softmax(get_policy_values(s), dim=-1)
sampler = Categorical(action_probs)
a = sampler.sample()
log_prob = sampler.log_prob(a)
new_s, r, done, _ = env.step(a.item())
rewards.append(r)
s = new_s
if done:
print("Episode {} finished with reward {}".format(i + 1, np.sum(rewards)))
break
def plot_rewards(rewards, running_rewards):
"""
Графики
"""
plt.style.use('seaborn-darkgrid')
fig = plt.figure(figsize=(12, 7))
ax1 = fig.add_subplot(2, 1, 1)
ax2 = fig.add_subplot(2, 1, 2)
plt.subplots_adjust(hspace=.5)
ax1.set_title('Episodic rewards')
ax1.plot(rewards, label='Episodic rewards')
ax1.set_xlabel("Episodes")
ax1.set_ylabel("Rewards")
ax2.set_title('Running rewards')
ax2.plot(running_rewards, label='Running rewards')
ax2.set_xlabel("Episodes")
ax2.set_ylabel("Average rewards")
fig.savefig("backpropagatw_test.png")
if __name__ == "__main__":
num_episodes = 1500
verbose = True
print_every = 50
target_avg_reward_100ep = 195
running_reward = None
rewards = []
running_rewards = []
restore_model = True
if restore_model and os.path.isfile("polict.pt"):
policy = torch.load("policy.pt")
else:
policy = Policy()
optimizer = optim.Adam(policy.parameters(), lr=0.001)
for i in range(num_episodes):
reward = generate_episode()
rewards.append(reward)
running_reward = np.mean(rewards[-100:])
running_rewards.append(running_reward)
if verbose:
if not i % print_every:
print("Episode: {}. Running reward: {}".format(i + 1, running_reward))
if i >= 99 and running_reward >= target_avg_reward_100ep:
print("Episode: {}. Running reward: {}".format(i + 1, running_reward))
print("Ran {} episodes. Solved after {} episodes.".format(i + 1, i - 100 + 1))
break
elif i == num_episodes - 1:
print("Couldn't solve after {} episodes".format(num_episodes))
plot_rewards(rewards, running_rewards)
torch.save(policy, "cartpole_policy_reinforce.pt")
| {"hexsha": "81dd4f677837244ffa9caac9b8ba53f3b1826ac2", "size": 4994, "ext": "py", "lang": "Python", "max_stars_repo_path": "backpropagate_Cartpole/main.py", "max_stars_repo_name": "Forsenlol/Reinforced-training-simulating-the-work-of-neural-synapses", "max_stars_repo_head_hexsha": "21e70c3eb5fac8984adb78771bf25f1e5aef823a", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "backpropagate_Cartpole/main.py", "max_issues_repo_name": "Forsenlol/Reinforced-training-simulating-the-work-of-neural-synapses", "max_issues_repo_head_hexsha": "21e70c3eb5fac8984adb78771bf25f1e5aef823a", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "backpropagate_Cartpole/main.py", "max_forks_repo_name": "Forsenlol/Reinforced-training-simulating-the-work-of-neural-synapses", "max_forks_repo_head_hexsha": "21e70c3eb5fac8984adb78771bf25f1e5aef823a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 28.5371428571, "max_line_length": 106, "alphanum_fraction": 0.6283540248, "include": true, "reason": "import numpy", "num_tokens": 1258} |
# -*- coding: utf-8 -*-
#
# example exs4a
# ----------------------------------------------------------------
# PURPOSE
# Analysis of a plane truss using loops.
# ----------------------------------------------------------------
# REFERENCES
# P-E Austrell 1994-03-08
# K-G Olsson 1995-09-28
# O Dahlblom 2004-08-31
# J Lindemann 2009-01-25
# ----------------------------------------------------------------
import numpy as np
import calfem.core as cfc
# ----- Topology matrix Edof -------------------------------------
edof = np.array([
[1, 2, 5, 6],
[3, 4, 7, 8],
[5, 6, 9, 10],
[7, 8, 11, 12],
[7, 8, 5, 6],
[11, 12, 9, 10],
[3, 4, 5, 6],
[7, 8, 9, 10],
[1, 2, 7, 8],
[5, 6, 11, 12]
])
# ----- Stiffness matrix K and load vector f ---------------------
K = np.zeros([12, 12])
f = np.zeros([12, 1])
f[10] = 0.5e6*np.sin(np.pi/6)
f[11] = -0.5e6*np.cos(np.pi/6)
# ----- Element properties ---------------------------------------
A = 25.0e-4
E = 2.1e11
ep = [E, A]
# ----- Element coordinates --------------------------------------
ex = np.array([
[0., 2.],
[0., 2.],
[2., 4.],
[2., 4.],
[2., 2.],
[4., 4.],
[0., 2.],
[2., 4.],
[0., 2.],
[2., 4.]
])
ey = np.array([
[2., 2.],
[0., 0.],
[2., 2.],
[0., 0.],
[0., 2.],
[0., 2.],
[0., 2.],
[0., 2.],
[2., 0.],
[2., 0.]
])
# ----- Create element stiffness matrices Ke and assemble into K -
for elx, ely, eltopo in zip(ex, ey, edof):
Ke = cfc.bar2e(elx, ely, ep)
cfc.assem(eltopo, K, Ke)
print("Stiffness matrix K:")
print(K)
# ----- Solve the system of equations ----------------------------
bc = np.array([1, 2, 3, 4])
a, r = cfc.solveq(K, f, bc)
print("Displacements a:")
print(a)
print("Reaction forces r:")
print(r)
# ----- Element forces -------------------------------------------
ed = cfc.extract_ed(edof, a)
N = np.zeros([edof.shape[0]])
print("Element forces:")
i = 0
for elx, ely, eld in zip(ex, ey, ed):
N[i] = cfc.bar2s(elx, ely, ep, eld)
print("N%d = %g" % (i+1, N[i]))
i += 1
| {"hexsha": "a1ef88434b5ccc3090b6123d01b327bb72daa7ec", "size": 2113, "ext": "py", "lang": "Python", "max_stars_repo_path": "examples/exs_bar2_la.py", "max_stars_repo_name": "CALFEM/calfem-py", "max_stars_repo_head_hexsha": "26d4082ca6b907c48ad814733c733ae30a959657", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "examples/exs_bar2_la.py", "max_issues_repo_name": "CALFEM/calfem-py", "max_issues_repo_head_hexsha": "26d4082ca6b907c48ad814733c733ae30a959657", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "examples/exs_bar2_la.py", "max_forks_repo_name": "CALFEM/calfem-py", "max_forks_repo_head_hexsha": "26d4082ca6b907c48ad814733c733ae30a959657", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 19.7476635514, "max_line_length": 66, "alphanum_fraction": 0.3880738287, "include": true, "reason": "import numpy", "num_tokens": 765} |
% Options for packages loaded elsewhere
\PassOptionsToPackage{unicode}{hyperref}
\PassOptionsToPackage{hyphens}{url}
%
\documentclass[
]{book}
\usepackage{lmodern}
\usepackage{amssymb,amsmath}
\usepackage{ifxetex,ifluatex}
\ifnum 0\ifxetex 1\fi\ifluatex 1\fi=0 % if pdftex
\usepackage[T1]{fontenc}
\usepackage[utf8]{inputenc}
\usepackage{textcomp} % provide euro and other symbols
\else % if luatex or xetex
\usepackage{unicode-math}
\defaultfontfeatures{Scale=MatchLowercase}
\defaultfontfeatures[\rmfamily]{Ligatures=TeX,Scale=1}
\fi
% Use upquote if available, for straight quotes in verbatim environments
\IfFileExists{upquote.sty}{\usepackage{upquote}}{}
\IfFileExists{microtype.sty}{% use microtype if available
\usepackage[]{microtype}
\UseMicrotypeSet[protrusion]{basicmath} % disable protrusion for tt fonts
}{}
\makeatletter
\@ifundefined{KOMAClassName}{% if non-KOMA class
\IfFileExists{parskip.sty}{%
\usepackage{parskip}
}{% else
\setlength{\parindent}{0pt}
\setlength{\parskip}{6pt plus 2pt minus 1pt}}
}{% if KOMA class
\KOMAoptions{parskip=half}}
\makeatother
\usepackage{xcolor}
\IfFileExists{xurl.sty}{\usepackage{xurl}}{} % add URL line breaks if available
\IfFileExists{bookmark.sty}{\usepackage{bookmark}}{\usepackage{hyperref}}
\hypersetup{
pdftitle={Recipes by Anna \& Ivan},
pdfauthor={(1+V)Anya},
hidelinks,
pdfcreator={LaTeX via pandoc}}
\urlstyle{same} % disable monospaced font for URLs
\usepackage{longtable,booktabs}
% Correct order of tables after \paragraph or \subparagraph
\usepackage{etoolbox}
\makeatletter
\patchcmd\longtable{\par}{\if@noskipsec\mbox{}\fi\par}{}{}
\makeatother
% Allow footnotes in longtable head/foot
\IfFileExists{footnotehyper.sty}{\usepackage{footnotehyper}}{\usepackage{footnote}}
\makesavenoteenv{longtable}
\usepackage{graphicx}
\makeatletter
\def\maxwidth{\ifdim\Gin@nat@width>\linewidth\linewidth\else\Gin@nat@width\fi}
\def\maxheight{\ifdim\Gin@nat@height>\textheight\textheight\else\Gin@nat@height\fi}
\makeatother
% Scale images if necessary, so that they will not overflow the page
% margins by default, and it is still possible to overwrite the defaults
% using explicit options in \includegraphics[width, height, ...]{}
\setkeys{Gin}{width=\maxwidth,height=\maxheight,keepaspectratio}
% Set default figure placement to htbp
\makeatletter
\def\fps@figure{htbp}
\makeatother
\setlength{\emergencystretch}{3em} % prevent overfull lines
\providecommand{\tightlist}{%
\setlength{\itemsep}{0pt}\setlength{\parskip}{0pt}}
\setcounter{secnumdepth}{5}
\usepackage{booktabs}
\usepackage{amsthm}
\makeatletter
\def\thm@space@setup{%
\thm@preskip=8pt plus 2pt minus 4pt
\thm@postskip=\thm@preskip
}
\makeatother
\usepackage[]{natbib}
\bibliographystyle{apalike}
\title{Recipes by Anna \& Ivan}
\author{(1+V)Anya}
\date{2020-11-14}
\begin{document}
\maketitle
{
\setcounter{tocdepth}{1}
\tableofcontents
}
\hypertarget{baking}{%
\chapter{Baking}\label{baking}}
\hypertarget{macaron-endless-vanilla}{%
\section{Macaron ``Endless Vanilla''}\label{macaron-endless-vanilla}}
For biscuits:
\begin{itemize}
\tightlist
\item
150 g almond flour, sifted twice
\item
150 g powdered sugar
\item
55 g egg whites, aged (from about 1.5 eggs)
\item
1 1/2 vanilla pod
\item
150 g icing sugar
\item
37 g mineral water (without gases)
\item
55 g egg whites, aged
\end{itemize}
For vanilla ganache:
\begin{itemize}
\tightlist
\item
200 g heavy cream
\item
1 vanilla pod from Mexico
\item
1 vanilla pod from Madagascar
\item
1 vanilla pod from Tahiti
\item
220 grams of white chocolate (Valrhona preferred, but not critical)
\end{itemize}
Eggs:
You have noticed that I have the term ``aged'' proteins. This means that the day before you cook macarons, the egg whites need to be removed from the refrigerator, covered with cling film and allowed to stand at room temperature all this time. Such egg whites will become more liquid, they seem to decompose (the structure of the protein is destroyed). As a result, they become more voluminous when whipped.
Ganache:
Cut the vanilla pods in half. Use a knife to remove all the seeds. Add them along with the pods to a saucepan and pour the cream.
Bring to a boil over medium heat. Remove from heat, cover and let sit for 30 minutes.
Break the chocolate into pieces and melt in a water bath.
Heat the cream again. Remove the vanilla pods from the saucepan and gradually pour the vanilla cream into the chocolate in a thin stream, stirring constantly with a whisk so that there are no lumps.
Cover with cling film and refrigerate overnight.
Macarons:
Line a baking sheet with parchment paper. Prepare a cooking syringe or a piping bag with a straight round nozzle.
Macarons are made with a diameter of 3-4 cm. To begin with, while your hand is not trained, you can draw stencils for yourself on parchment paper, on the back side. In France, for example, you can buy baking paper with a stencil already marked or a silicone mat.
Sift almond flour. If large pieces of nuts remain in the sieve, grind them over again or leave them on a biscuit, cake, pie.
Sift almond flour and powdered sugar several times into a bowl.
Remove the seeds from the vanilla pods and add to the almond mixture. Do not stir. Add the first batch of egg whites (55 g) - do not stir.
In a small saucepan, combine water and second powdered sugar and bring to a boil until it reaches 118'C.
Whisk the second 55 g of egg whites until firm.
Gradually pour in the hot syrup in a thin stream. Continue whisking until the mixture is cool and shiny, thick and smooth.
When you lift the whisk, a mass should remain on its tip and not fall. A ``nose'' of proteins in a bowl, do not stand straight, but ``fall''. That said, if you turn your bowl of whites upside down, nothing should fall or leak.
Add the resulting Italian meringue (egg whites with hot syrup) to the first mixture.
Stir gently with a spatula while rotating the bowl counterclockwise with the other hand.
The mass should turn out to be homogeneous, soft, pliable. If you raise the spatula, then the drops of dough that will fall down should slowly spread, and not keep their shape. Do not be afraid to interfere - if the dough is poorly mixed, the pasta you have planted will have ``tails'' on the surface. Otherwise, they will slowly spread and take an even shape.
Place the dough in a syringe or bag and place in even circles on the lined paper.
And leave them to stand at room temperature for 1 hour. This is a very important stage of cooking - a light film forms on the surface of the meringue, due to which they do not crack during baking and a beautiful ``skirt'' is formed at the bottom.
Heat the oven to 175C.
Bake the cookies for 12-15 minutes. During cooking, 2 times (at the 8th and 10th minutes) very quickly open and close the oven, being careful not to knock.
Take out the finished cookies, grab the edges of the paper and transfer it along with the macarons onto a flat surface. Let cool completely.
A properly baked cookie will easily come off the paper surface.
When the cookies have cooled, fill a syringe or bag with ganache and place a small amount on one half, then close the other.
Refrigerate for 24 hours. Only cooked macaroons are never eaten - the halves will be tough and the whole cream will spread. They need to be given time to ``come up'', to brew, and then they will get exactly what they are so fond of: crisp crust, tender center and melting filling.
\hypertarget{upside-down-cake-with-nuts-and-bananas}{%
\section{Upside down cake with nuts and bananas}\label{upside-down-cake-with-nuts-and-bananas}}
\begin{itemize}
\item
130g butter
\item
120g sugar
\item
120g flour
\item
50g ground roasted nuts
\item
3 eggs
\item
0.5 tsp baking powder
\item
2 bananas
\item
50g dark muscovado sugar
\item
20g butter
\end{itemize}
cake pan 10x20cm
oven 170C
First, brush the sides of the mold with softened butter and sprinkle with flour. Combine dark sugar and 20g butter and place on the bottom of the mold.
Prepare the dough. All foods should be at room temperature. Beat butter with sugar.
Add two eggs, one at a time, whisking each time until smooth at maximum mixer speed.
Add the nuts, add the remaining egg and beat the mixture again with a mixer.
Add flour and baking powder, stir on low speed until smooth.
Cut each banana into three pieces lengthwise. Place the cooked bananas in a mold on top of the sugar mixture.
Place the dough on top and tap well with the mold on the table.
Bake at 170C for 50 minutes.
Remove and cut the top off by sliding the knife along the edge of the pan and flip the hot cupcake onto a plate.
\hypertarget{spicy-donutsmuffins-from-the-oven-with-apples}{%
\section{Spicy donuts/muffins from the oven with apples}\label{spicy-donutsmuffins-from-the-oven-with-apples}}
\begin{itemize}
\tightlist
\item
Flour - 140 g
\item
Cinnamon - 1 tsp
\item
Ginger - 1 tsp
\item
Baking powder - 1 tsp.
\item
Sugar - 140 g
\item
Condensed milk - 100 g
\item
Egg - 1 piece
\item
Butter - 25 g
\item
Apple
\end{itemize}
In a cup, combine flour (140 g), cinnamon and dried ginger (1 tsp each, I would not add fresh ginger), baking powder (also 1 tsp). And sugar (140 g). Stir the whole mass very well and carefully, because we need the baking powder, and the spices, to be distributed very evenly in the future dough. Of course, you can refuse cinnamon and ginger, or replace / supplement them with other spices - all at your discretion. Further condensed milk (100 g, you can replace with fat sour cream).
Melted butter (25 g). One egg. And at the very end, grate the apple (110-120 g). Traditionally, I take Granny Smith, because he can be found anywhere in Russia. Here you can play with filling. Add chopped nuts, oatmeal, dried berries. Mix the finished dough well and transfer to a bag.
This will make it most convenient for you to use it. Place the dough into molds with the expectation that it will rise by about one and a half times. Obviously, these are not real donuts (they are only deep-fried), so you can safely use any small-sized molds - make cupcakes, mini cupcakes and other options.
Bake at 180 degrees (top-bottom) until tender. Check with a skewer.
\hypertarget{carrot-cake}{%
\section{Carrot cake}\label{carrot-cake}}
For cakes:
* 360 ml (1.5 cups) vegetable oil
* 200 g (4 pcs) eggs
* 300 g (2 cups) flour
* 460 g (2 cups) sugar
* 8 g (1 sachet) baking powder
* 15 g (2 tsp) baking soda
* 3 g (1/2 tsp) salt
* 12 grams (1.5 tsp) cinnamon
* 5 g (0.5 tsp) nutmeg
* 5 g (0.5 tsp) cloves
* 500 g of carrots, grated on a fine grater
* 200 g (1 cup) nuts
For the cream:
* 800 g of curd cheese
* 120 g icing sugar
* 150 ml cream 33\%
First you need to combine vegetable oil with sugar. To do this, pour oil into the mixer bowl and add sugar. Beat at the maximum speed of the mixer for about 7 minutes, until the components are combined.
While the butter and sugar are beating, combine all dry ingredients in a separate bowl: sifted flour, salt, baking powder, soda, cinnamon, cloves, nutmeg. Stir well with a whisk until evenly spread.
When the butter and sugar have combined into a single emulsion, add the eggs one at a time, letting each egg dissolve completely (stir for 4-5 minutes). After all the eggs are added, you can add the dry mixture to the liquid one.
It is better to knead with a silicone spatula, since the mixture is not sticky and lumps are not formed during stirring. The result is a smooth, thick, but flowing dough.
It remains to add chopped nuts and grated carrots. It will be difficult to stir at first, but after a couple of minutes these ingredients will be evenly distributed throughout the mixture.
Fasten the bottom of the molds with baking paper and wrap in foil. Divide the mixture into three equal portions and place in separate baking tins. It is convenient to do this on a scale, for this you need to know the weight of the container in which all the ingredients were mixed. Spread the dough evenly with a spatula. Put in an oven preheated to 175 degrees, bake until tender for about 35 minutes.
Remove ready-made biscuits from the oven and let cool. Carefully cut out of the molds with a knife, remove the parchment from the bottom of the biscuits. We have got aromatic, juicy, porous biscuits. wrap in cling film, it is better to separate each cake, put in the refrigerator for several hours.
After the biscuits have cooled completely, you can start assembling the cake.
The recipe for the curd cream used for this cake can be found here.
Apply a small amount of cream to the cake base so that the cakes do not slip during assembly. Soak the cake with caramel (we took cherry), but you can not use the impregnation, since the biscuits are so juicy and bright.
Put some of the cream on the soaked biscuit, spread in an even layer, smooth the edges by going over the side of the cake with a spatula, trying to keep it perpendicular to the substrate.
Put the next cake and repeat the manipulation of the biscuit impregnation and cream application.
We complete the assembly of the cake with the last third biscuit, remembering to soak it. Apply the remaining cream to the top of the cake and the sides, closing all the small gaps.
We have got a delicious, tender, even cake in a ``half-naked'' coating. We leave it for an hour and a half in the refrigerator, so that the cream stiffens a little, and it is convenient to cut the cake. Decorate as desired (in our case, there were gingerbread cookies, some crushed nuts and fresh berries).
\hypertarget{strawberry-cake}{%
\section{Strawberry cake}\label{strawberry-cake}}
For cake
* 225 g butter
* 225 g sugar
* 4 eggs
* 225 g self-rising flour (or 220 g plain flour + 1 tablespoon baking powder)
* 3 tbsp milk
For filling
* 300 ml heavy cream
* 300 g strawberries
* 100 ml strawberry jam
Beat the softened butter with sugar. Add the eggs one at a time, beating without stopping. The mass must be airy. Gently put in the flour, mix the dough with a spoon, drawing an eight: this way the mixture loses a minimum of air. Add milk. We bake for 1 hour in a greased form at a temperature of 160 degrees Celsius.
At this time, we rest. The cream is resting in the refrigerator. Cold cream is easier to whip.
We take the cake out of the oven, let it cool in the form for 15 minutes. We take it out of the mold and leave it to cool again. At this time, cut the strawberries into slices, whip the cream. Add strawberry jam to the whipped cream and mix slightly. Precisely slightly, so that the cream turns out beautiful streaks of jam, and not a solid pink mass.
Then everything is intuitively clear: cut the cake lengthwise into three parts, put half the cream and half the strawberries on the first layer. Put in the second layer, lay out the remaining cream and strawberries. Cover with a third layer and sprinkle with a spoonful of powdered sugar. We decorate (if we have time before it is eaten) with fresh strawberries.
\hypertarget{cake-with-ricotta-and-pears}{%
\section{Cake with ricotta and pears}\label{cake-with-ricotta-and-pears}}
\begin{itemize}
\tightlist
\item
4-5 pears
\item
Dark sugar 2tsp
\item
140 g butter
\item
100 g sugar
\item
1 egg
\item
140 g ricota
\item
zest of 1 lemon
\item
200 g flour (can be less)
\item
10 g baking powder
\end{itemize}
Put paper in a split form, grease with oil and sprinkle with brown oil. Then lay out the chopped pear in slices.
Beat butter and sugar until white, add egg, ricotta and zest.
Sift flour (do not add all the flour at once! How much it will take), baking powder and vanilla sugar, mix everything until smooth.
Pour the dough over the pears and put them in the oven for 50-60 minutes at 180C.
\hypertarget{cottage-cheese-cake-with-raspberries-or-apricots}{%
\section{Cottage cheese cake with raspberries or apricots}\label{cottage-cheese-cake-with-raspberries-or-apricots}}
\begin{itemize}
\tightlist
\item
2 eggs
\item
75 g sugar
\item
Lemon zest
\item
200 g cottage cheese
\item
40 g starch
\item
1 tablespoon ground almonds
\item
200 g raspberries (can be frozen)
\end{itemize}
If the raspberries are frozen, remove them from the freezer at the beginning of the pie.
Beat eggs with sugar for several minutes until a fluffy light mass is obtained.
Add finely grated lemon zest.
Rub the cottage cheese through a sieve and hang with beaten eggs, add starch.
Place the dough in a greased dish and sprinkle with ground almonds or flour, spread the raspberries on top. Bake for 45-50 minutes at 180C.
\hypertarget{rudolphs-cookies-for-christmas-and-new-year}{%
\section{Rudolph's cookies for Christmas and New Year}\label{rudolphs-cookies-for-christmas-and-new-year}}
\begin{itemize}
\tightlist
\item
280 g flour
\item
100 g nut flour
\item
130 g sugar
\item
A bit of salt
\item
200 g cold butter
\item
2 egg yolks
\item
Vanilla
\item
1 tbsp of lemon juice
\item
Lemon zest
\end{itemize}
Mix all together into crumbles. Make a ball, split into several parts and put in fridge for 30 min.
Form biscits and bake on 175C, for 12-13 min.
\hypertarget{swedish-chocolate-cake}{%
\section{Swedish chocolate cake}\label{swedish-chocolate-cake}}
\begin{itemize}
\tightlist
\item
135 g butter
\item
50 g cacao
\item
180 g sugar
\item
110 g flour
\item
3 eggs
\end{itemize}
Melt butter, add everything step by step.
Put in a baking form and bake on 180C until the first cracks.
\hypertarget{scones}{%
\section{Scones}\label{scones}}
for 20 scones
* 610 g flour T45
* 610 g flour T55
* 60 g baking powder
* 280 g powder sugar
* 4 g salt
* 60 g trimoline (inverted sugar)
* 280 g butter
* 400 g milk
* 240 g buttermilk
* 120 g white raisins
Mix all dry elements with butter in a standing mixer with a mixing tool.
Change a mixing tool for a dough hook when butter disolves in dry ingredients. Add milk and buttermilk, mix for 3 min on Speed1, then 2 min on Speed2.
Split the dough into two
\hypertarget{apple-and-cinnamon-rolls}{%
\section{Apple and cinnamon rolls}\label{apple-and-cinnamon-rolls}}
\begin{itemize}
\tightlist
\item
240+50 g flour
\item
65 g warm milk
\item
40 g butter
\item
30 g sugar
\item
1 egg yolk
\item
1/2 sachet of yeast (or 10g of fresh yeast)
\item
60 g water
\end{itemize}
For filling:
* 3 apples
* 60 g sugar
* 50 g butter
* 50 g blond raisins
* 1 tsp grounded cinnammon
For cream:
* 100 g sugar
* 100 g Philadelphia
In a mixing bowl (e.g., KitchenAid) mix yeast, water and warm milk, add 240 g of flour, sugar, salt and egg yolk, and mix it for 10 minutes. Add butter and mix another 5 minutes. Cover the dough with a film and put to rest for 2 hours.
Peel apples, cut into slices and put in a saucepan with the butter, then add sugar, raisins and cinnammon. Cook for 15 minutes, often steering. Let it cool down.
Roll the dough into a rectangle shape on a dusted surface, spread the apple filling, roll it and cut into 9 buns. Cover a baking dish (25cm) with a baking paper and place all buns next to each other. Leave it rest for 1 hour.
Heat the oven up to 180C. Place buns there for 25 minutes.
Prepare the cream: whip cheese with sugar and leave it aside.
When buns are not hot, spread cream all over them. Enjoy!
\hypertarget{main-dishes}{%
\chapter{Main dishes}\label{main-dishes}}
\hypertarget{broccoli-baked-in-cream-with-cheese}{%
\section{Broccoli baked in cream with cheese}\label{broccoli-baked-in-cream-with-cheese}}
\begin{itemize}
\tightlist
\item
1 head of broccoli
\item
1 egg
\item
100 ml cream 10\%
\item
nutmeg (on the tip of a knife)
\item
a handful of cheese
\end{itemize}
Divide the broccoli into inflorescences and boil in boiling water for 3 minutes. Not more! This is enough for the initial heat treatment, while the broccoli remains crispy and tasty.Combine egg, cream and nutmeg.
Put broccoli in a mold, pour over the filling, sprinkle with grated cheese on top.
Bake in the oven for 15 minutes at 170C.
\hypertarget{adjarian-khachapuri-bread-with-cheese---acharuli}{%
\section{Adjarian Khachapuri (bread with cheese) - Acharuli}\label{adjarian-khachapuri-bread-with-cheese---acharuli}}
For 2-4 boats (depending on size)
Ingredients:
\begin{itemize}
\tightlist
\item
380-400 gr flour
\item
200 ml warm water
\item
100 ml milk
\item
40 ml vegetable oil
\item
1 tsp dry yeast
\item
1 tsp salt
\item
2 tsp sugar
\end{itemize}
For filling:
* 1 egg
* 200 gr suluguni (mozarella)
* 400 gr Imeretian cheese (feta + cheddar/parmezan)
* 50-70 ml milk or water
* 1.5 tbsp flour
* 2-4 yolks (depending on the number of boats)
* 50 gr butter (optional)
Preparation:
1. Dissolve the yeast with sugar in warm water, leave for 5 minutes. Add milk, mix and gradually add 200 g of flour, knead by hand or with a food processor until smooth, then gradually add the rest of the flour in the same way. (The dough should be collected in a ball, but with it is rather sticky). Add vegetable oil in parts and continue to knead. (The dough will become more obedient and smooth, slightly sticking to the fingers.) Then cover the dough with cling film or a towel, put in a warm place and let it increase at least twice. (You can also put it in the refrigerator for night if you are not going to bake everything at once)
2.Grate Imeretian cheese (you can replace it with Adyghe or vats, or at least feta). Add one egg, milk (water), flour, salt if necessary, if the cheese is not salted at all, mix everything into a homogeneous mass, it should be not dry, but like porridge.
3. Separately grate the suluguni on a coarse grater (can be replaced with mozzarella).
4. Divide the matched dough into 2-4 parts, depending on the size of the boat (khachapuri), form balls and leave for 10 minutes, covered with cling film or a towel. The dough will again become soft, pliable and more fluffy. Hands and work surface should be dusted with flour to work comfortably with the dough.
5.I recommend forming the boats directly on the parchment to make it easier to transfer to a baking sheet.
Forming No.~1:
With your fingers open the ball into a cake, while pressing only in the center of the cake, so that the edges remain thick and the center is thin \ldots{} Then take the cake in your hands and twist, stretching the dough (like on a pizza). Put again on the work surface, stretch the circle in a light oval, bring two edges of the dough to the middle, just pull the other two edges, lengthening the resulting shape of the dough, then connect. It turns out the shape of a ``canoe'' in which the edges are stuck together. Then open the center with your fingers, stretching the dough in width, forming the shape of a boat with sides .Put the cheese mass in the center of the boat, distribute and pour grated suluguni on top.
Forming No.~2
Open a uniform flat cake with your fingers, pick it up and stretch it slightly in all directions to make a thinner middle. Put the cheese mass on the cake, spread over the entire surface, leaving only the edges free. Wrap the sides of the cake, grabbing the cheese part. (As if you are wrapping a roll), while glue only the ends, creating the ends of the boat. Pour grated suluguni over the open cheese mass.
\begin{enumerate}
\def\labelenumi{\arabic{enumi}.}
\setcounter{enumi}{5}
\tightlist
\item
In a preheated oven (220 degrees) and a baking sheet, transfer the formed boats, bake for 15 minutes, until golden brown.
\item
Remove the boats from the oven. Optionally, you can remove the excess dough from the khachapuri formed according to method No.~1. Separate the cheese part from the dough with a fork, pry it under the side and gently move the hand, remove the dough. Push cheese into the empty sides, put the yolk on top , then back into the oven for 1-2 minutes. And on the khachapuri formed according to the method No.~2, simply put the yolk on top and again in the oven for 1-2 minutes. If desired, after the oven, coat the edges of the dough with butter and serve hot with a piece oil next to szheltkom.
\end{enumerate}
\hypertarget{baked-aubergines-with-mozzarella-cheese}{%
\section{Baked aubergines with mozzarella cheese}\label{baked-aubergines-with-mozzarella-cheese}}
2 servings
\begin{itemize}
\tightlist
\item
1 medium aubergines
\item
1 scoop of mozzarella
\item
50 g parmesan
\item
5-6 medium tomatoes (or a can of chopped tomatoes)
\item
1 clove of garlic
\item
several sprigs of basil
\item
olive oil
\item
flour
\item
salt
\item
black pepper
\end{itemize}
Cut the aubergines into circles that are not too thick (about 0.5 cm), lightly add salt and place in a colander for about half an hour: during this time, as they say, excessively bitter juices will come out of them. Also slice the mozzarella thinly and grate the Parmesan cheese. Peel tomatoes (how to peel tomatoes) and cut into small pieces. Fry chopped garlic and basil stalk in a little olive oil, add chopped tomatoes and basil leaves, season with salt and pepper and simmer for 10-15 minutes over medium heat until the sauce thickens. Remove from heat and strain through a sieve or grind in a blender.
Rinse the aubergines, dry quickly, roll in flour and lightly fry in olive oil (aubergines tend to absorb oil like a sponge, so add some oil if necessary before loading the pan with the next portion). Transfer the fried aubergines to napkins to absorb the excess oil. Everything is ready to assemble our ``tower'': take a baking dish (best glass or ceramic), put a spoonful of tomato sauce on the bottom, place a slice of eggplant on top, put a slice of mozzarella on top and sprinkle with Parmesan. The next ``floor'' is tomato sauce, eggplant, cheese, and so on until you build as many towers as you plan to make eaters happy with this delicious dish. Bake the aubergines in an oven preheated to 220 degrees until the cheese melts on top into a golden crust (this will take 15-20 minutes), and serve immediately.
\hypertarget{crispy-greek-style-pie}{%
\section{Crispy Greek-style pie}\label{crispy-greek-style-pie}}
\begin{itemize}
\tightlist
\item
200g bag spinach leaves
\item
175g jar sundried tomato in oil
\item
100g feta cheese, crumbled
\item
2 eggs
\item
0.5 250g pack filo pastry
\end{itemize}
Put the spinach into a large pan. Pour over a couple tbsp water, then cook until just wilted. Tip into a sieve, leave to cool a little, then squeeze out any excess water and roughly chop. Roughly chop the tomatoes and put into a bowl along with the spinach, feta and eggs. Mix well.
Carefully unroll the filo pastry. Cover with some damp sheets of kitchen paper to stop it drying out. Take a sheet of pastry and brush liberally with some of the sundried tomato oil. Drape oil-side down in a 22cm loosebottomed cake tin so that some of the pastry hangs over the side. Brush oil on another piece of pastry and place in the tin, just a little further round. Keep placing the pastry pieces in the tin until you have roughly three layers, then spoon over the filling. Pull the sides into the middle, scrunch up and make sure the filling is covered. Brush with a little more oil.
Heat oven to 180C/fan 160C/gas 4. Cook the pie for 30 mins until the pastry is crisp and golden brown. Remove from the cake tin, slice into wedges and serve with salad.
\hypertarget{lamb-stuffed-aubergines-with-moorish-spices-and-manchego-cheese}{%
\section{Lamb-stuffed aubergines with Moorish spices and Manchego cheese}\label{lamb-stuffed-aubergines-with-moorish-spices-and-manchego-cheese}}
\begin{itemize}
\tightlist
\item
4 aubergines
\item
6 tbsp olive oil
\item
1 onion, chopped
\item
4 garlic cloves, finely chopped
\item
1 large red pepper, seeds removed, chopped
\item
1½ tsp freshly ground cumin seeds
\item
1 tsp ground cinnamon
\item
½ tsp freshly grated nutmeg
\item
1 tsp pimentón dulce (smoked sweet Spanish paprika)
\item
large pinch of crushed dried chillies
\item
500g/1lb 2oz lamb mince
\item
6 tbsp tomato sauce (see Top recipe tip below)
\item
100g/3½oz Manchego cheese, coarsely grated
\item
salt and freshly ground black pepper
\end{itemize}
Preheat the oven to 200C/400F/Gas 6.
Cut each aubergine lengthways through the stalk, then score the flesh in a tight criss-cross pattern, taking the knife through the flesh down to the skin, but taking care not to cut through the skin. Place them side by side on a baking tray and drizzle each half with half a tablespoon of the oil, season with salt and bake for 30-40 minutes or until the flesh is soft and tender but not browned.
Meanwhile, heat the remaining two tablespoons of oil in a large non-stick frying pan. Add the onion, garlic, red pepper and spices and fry gently for 10 minutes. Add the lamb mince and fry for 3--4 minutes or until all the meat is lightly browned. Stir in the tomato sauce and simmer for five minutes.
Remove the aubergines from the oven and increase the temperature to 220C/425F/Gas 7. Carefully scoop most of the flesh out of the baked aubergine halves, leaving the skins with a layer of flesh about 1cm/½in thick. Stir the scooped-out flesh into the lamb mixture with half a teaspoon of salt and some pepper to taste. Spoon the mixture into each aubergine shell and sprinkle with the grated cheese. Bake in the oven for 8--10 minutes, or until the cheese is bubbling and golden-brown.
\#\# Simple baked lasagne
\begin{itemize}
\item
2 carrots , peeled
\item
2 onions , peeled
\item
2 cloves of garlic , peeled
\item
2 sticks of celery , trimmed
\item
olive oil
\item
2 rashers of higher-welfare smoked streaky bacon
\item
½ a bunch of fresh thyme
\item
500 g quality beef mince
\item
a good splash of red wine
\item
1 gluten-free beef stock cube , preferably organic
\item
2 x 400 g tins of plum tomatoes
\item
sea salt
\item
freshly ground black pepper
\item
1 x gluten-free pasta dough
\item
3 anchovy fillets , from sustainable sources
\item
500 ml crème fraîche
\item
2 handfuls of freshly grated Parmesan cheese
\item
milk , optional
\end{itemize}
To make the Bolognese sauce, finely chop the carrots, onions, garlic and celery and add to a large, wide pan over a medium heat with a drizzle of olive oil. Roughly chop and add the bacon, then pick in the thyme leaves and cook for 5 to 10 minutes, or until softened and lightly golden.
Turn the heat up slightly, then stir in the beef mince, breaking it up with a wooden spoon. Cook for around 5 minutes, or until browned all over. Add the wine and crumble in the stock cube, stirring continuously until the liquid has completely reduced. Stir in the tomatoes and 1 tin's worth of water and bring to the boil. Reduce to a simmer, cover and cook for around 1 hour, then remove the lid and continue cooking for 30 minutes, or until thickened and reduced.
Meanwhile, preheat the oven to 180ºC/350ºF/gas 4.
For the white sauce, finely chop the anchovies, then mix with the crème fraîche and a handful of Parmesan, then season with salt and pepper -- you may need to loosen the mixture with a little milk.
Cut the sheets of pasta into rectangles (roughly 10cm x 15cm).
Spoon one-third of the Bolognese sauce into an ovenproof dish (roughly 25cm x 30cm). Layer over one-third of the lasagne sheets and top with one-third of the béchamel sauce. Repeat with the remaining ingredients until you have three layers in total, finishing with a final layer of béchamel. Grate over the remaining Parmesan and drizzle with olive oil, then cover with tin foil. Place in the hot oven for around 20 minutes, remove the foil and continue cooking for around 30 minutes, or until golden and bubbling. Serve with a nice, crisp salad.
\#\# Kung Pao chicken
\begin{itemize}
\tightlist
\item
1 tablespoon Szechuan peppercorns
\item
2½ tablespoons cornflour
\item
4 skinless higher-welfare chicken thighs , (350g)
\item
groundnut oil , or vegetable oil
\item
4 cloves of garlic
\item
5 cm piece of ginger
\item
2 spring onions
\item
6 dried red chillies
\item
2 tablespoons low-salt soy sauce
\item
½ tablespoon rice wine vinegar
\item
1 heaped tablespoon runny honey
\item
50 g unsalted peanuts
\item
1 punnet of salad cress
\end{itemize}
Toast the Szechuan peppercorns in a dry frying pan until lightly golden. Transfer to a pestle and mortar, grind to a fine powder, then sieve into a large bowl, discarding any large, tough bits.
Add 2 tablespoons of cornflour and stir to combine. Chop the chicken into bite-sized chunks, then toss in the cornflour mixture to coat.
Pour 2cm of oil into a large non-stick frying pan over a medium heat, add the chicken and fry for 7 to 8 minutes, or until golden and cooked through.
Meanwhile, peel and finely slice the garlic and ginger, then trim and finely slice the spring onions.
Using a slotted spoon, remove the chicken to a double layer of kitchen paper to drain. Carefully remove and discard most of the oil, leaving about 2 tablespoons in the pan, then return to a medium heat.
Add the garlic and ginger and fry for 2 minutes, or until lightly golden, then stir in the spring onions and whole chillies and fry for 1 further minute.
Meanwhile, combine ½ tablespoon of cornflour and 2 tablespoons of water. Mix in the soy, vinegar and honey, then pour the mixture into the pan. Bring to the boil and simmer for a few minutes, or until slightly thickened.
Lightly bash and add the peanuts, stir in the chicken, then toss well until warmed through. Snip the cress over the ribbon salad, scatter the reserved coriander leaves over the chicken, then serve.
\#\# Turkey con chilli
\begin{itemize}
\tightlist
\item
olive oil
\item
2 red onions , peeled and roughly chopped
\item
1 carrot , peeled and roughly chopped
\item
1 leek , trimmed and roughly chopped
\item
1 red pepper , deseeded and roughly chopped
\item
1 yellow pepper , deseeded and roughly chopped
\item
1 fresh red chilli , deseeded and finely chopped
\item
1 fresh green chilli , deseeded and finely chopped
\item
1 bunch fresh coriander , stalks finely chopped, leaves picked
\item
1 teaspoon ground cumin
\item
1 heaped teaspoon smoked paprika
\item
1 heaped teaspoon runny honey , optional
\item
3 tablespoons white wine vinegar , optional
\item
600 g turkey , leftover, shredded
\item
sea salt
\item
freshly ground black pepper
\item
3 x 400 g tinned chopped tomatoes
\item
400 g tinned butter beans or chickpeas , drained
\item
2 limes , juice of
\item
soured cream , to serve
FOR THE GUACAMOLE
\item
2 ripe avocados , peeled and destoned
\item
2 tomatoes , halved
\item
¼ red onion , peeled
\item
½ clove garlic , peeled
\item
1 fresh green chilli , deseeded
\item
1 bunch fresh coriander
\item
1 lime
\end{itemize}
Preheat the oven to 180ºC/350ºF/gas 4. Heat a few lugs of olive oil in a large casserole-type pan on a medium heat. Add the onions, carrot, leek, peppers and chillies, and cook, stirring occasionally, for about 5 minutes. Add the coriander stalks, cumin and paprika, and cook for another 10 minutes or so, stirring frequently until soft and delicious. Sometimes I like to add some honey and white wine vinegar at this point and let it cook for a couple of minutes. I find this adds a wonderful sheen and enhances the natural sweetness of the vegetables.
While that's happening, shred the turkey meat off your carcass and roughly chop it. Add a good pinch of salt and pepper to the pan of vegetables, then add the turkey and take the pan off the heat. Add the tomatoes and chickpeas or butter beans and stir everything together. Pop it in the hot oven to blip away for 2 hours. Check on it after an hour, and add a splash of water if it looks a bit dry.
While that's cooking, make your guacamole by blitzing one of your avocados in a food processor with the tomatoes, onion, garlic, chilli and coriander. Use a fork to mash the other avocado in a bowl so it's nice and chunky. Taste the mixture in the food processor and add salt and squeezes of lime juice until the taste is just right for you. Whiz up one more time then tip into the bowl with your chunky avocado and mix together.
Take the chilli out of the oven and scrape all the gnarly bits from the edge of the pan back into the chilli for added flavour. Squeeze in some lime juice, and stir through most of the coriander leaves. Have a taste to check the seasoning then serve with steamed basmati rice or tortillas, and a good dollop of soured cream and guacamole on top. Scatter over your remaining coriander leaves and some finely sliced fresh chilli if you fancy then get everyone to tuck in.
\hypertarget{prawn-chorizo-orzo}{%
\section{Prawn \& chorizo orzo}\label{prawn-chorizo-orzo}}
\begin{itemize}
\tightlist
\item
2 cloves of garlic
\item
200 g quality chorizo
\item
0.5 a bunch of fresh basil (15g)
\item
4 tablespoons olive oil
\item
2 tablespoons sherry vinegar
\item
400 ml passata
\item
300 g orzo
\item
200 g cherry tomatoes, on the vine
\item
400 g large cooked peeled king prawns ,from sustainable sources
\end{itemize}
Preheat the oven to 180ºC/350ºF/gas 4.
Peel and finely chop the garlic, and slice the chorizo. Pick and finely chop the basil.
Heat half the oil in a heavy-based pan. Fry the garlic and chorizo for 3 minutes, then deglaze the pan with the vinegar.
Add the passata and 300ml of water, then the orzo. Bring to the boil, reduce the heat and simmer for 10 to 15 minutes, or until the orzo is al dente, stirring occasionally to prevent it sticking.
Spread the cherry tomatoes over a baking tray, drizzle with the rest of the oil and season. Roast for 10 minutes, or until soft.
Stir half the basil into the pasta, along with the prawns. Divide between bowls, top with the remaining basil and serve the roasted tomatoes alongside.
\hypertarget{chicken-kiev}{%
\section{Chicken kiev}\label{chicken-kiev}}
\begin{itemize}
\tightlist
\item
4 rashers of smoked streaky bacon
\item
olive oil
\item
4 x 150 g skinless chicken breasts , (I got mine from the butcher with the bone in, but either way is fine)
\item
3 tablespoons plain flour
\item
2 large free-range eggs
\item
150 g fresh breadcrumbs
\item
sunflower oil
\item
2 large handfuls of baby spinach , or rocket
\item
2 lemons
\item
BUTTER
\item
4 cloves of garlic
\item
½ a bunch of fresh flat-leaf parsley (15g)
\item
4 knobs of unsalted butter , (at room temperature)
\item
1 pinch of cayenne pepper
\end{itemize}
Fry the bacon in a pan on a medium heat with a tiny drizzle of olive oil, until golden and crisp, then remove.
For the butter, peel the garlic, then finely chop with the parsley leaves and mix into the softened butter with the cayenne. Firm up in the fridge.
Working one-by-one on a board, stuff the chicken breasts. To do this, start by pulling back the loose fillet on the back of the breast -- put your knife in the opposite direction and slice to create a long pocket (for extra guidance, watch the how-to video below).
Open the pocket up with your fingers, cut the chilled butter into four and push one piece into the pocket, then crumble in a rasher of crispy bacon. Fold and seal back the chicken, completely covering the butter and giving you a nice neat parcel. Repeat with the 3 remaining breasts.
Preheat the oven to 180°C/350°F/gas 4.
Place the flour in one shallow bowl, whisk the eggs in another, and put the breadcrumbs and a pinch of seasoning into a third. Evenly coat each chicken breast in flour, then beaten egg, letting any excess drip off, and finally, turn them in the breadcrumbs, patting them on until evenly coated.
Shallow-fry in 2cm of sunflower oil on a medium to high heat for a couple of minutes on each side, or until lightly golden, then transfer to a tray and bake in the oven for 10 minutes, or until cooked through. You can bake them completely in the oven and skip the frying altogether, you just need to drizzle them with olive oil and bake for about 20 minutes -- they won't be as golden, but they'll be just as delicious.
Meanwhile, peel and roughly chop the potatoes and cook in a large pan of boiling salted water for 12 to 15 minutes, or until tender.
Chop up the broccoli and add it to the potatoes for the last 8 minutes. Drain and leave to steam dry, then return to the pan and mash with a knob of butter and a pinch of salt and pepper.
Divide the mash between your plates and place a Kiev on top of each portion. Lightly dress the spinach leaves or rocket in a little oil and lemon juice, then sprinkle over the top as a salady garnish. Serve with a wedge of lemon on the side.
\hypertarget{farfalle-with-carbonara-and-spring-peas}{%
\section{Farfalle with carbonara and spring peas}\label{farfalle-with-carbonara-and-spring-peas}}
\begin{itemize}
\tightlist
\item
455 g farfalle
\item
1 free-range egg
\item
100 ml double cream
\item
12 rashers of higher-welfare pancetta or smoked streaky bacon
\item
3 handfuls of fresh podded or frozen peas
\item
2 sprigs of fresh mint
\item
2 handfuls of Parmesan cheese
\end{itemize}
First of all, bring a large pan of salted water to the boil, add the farfalle, and cook according to the packet instructions.
Whisk the egg in a bowl with the cream and season with sea salt and black pepper.
Roughly slice your pancetta or bacon and put it into a second pan over a medium heat and cook until crispy and golden.
When the farfalle is nearly cooked, add the peas for the last minute and a half. This way they will burst in your mouth and be lovely and sweet. When cooked, drain in a colander, saving a little of the cooking water.
Add the pasta to the pancetta. Pick and finely slice the mint leaves and stir in most of them. If the pan isn't big enough, mix it all together in a large warmed bowl.
Now you need to add the egg and cream mix to the pasta. What's important here is that you add it while the pasta is still hot. This way, the residual heat of the pasta will cook the eggs, but not so that they resemble scrambled eggs, as I've seen in some dodgy old restaurants on the motorway! The pasta will actually cook the egg enough to give you a silky smooth sauce. Toss together and loosen with a little of the reserved cooking water if necessary.
Season with salt and pepper, grate over the Parmesan and sprinkle over the rest of the mint leaves, and serve as soon as possible.
\hypertarget{taiwanese-3-cup-chicken}{%
\section{Taiwanese 3 Cup Chicken}\label{taiwanese-3-cup-chicken}}
\begin{itemize}
\tightlist
\item
1 - 1 1/2 lb. chicken drumettes
\item
15 - 20 cloves of garlic, peeled
\item
1 small piece of ginger, sliced
\item
fresh Thai basil leaves (red basil)
\end{itemize}
For the Sauce:
* 1/3 cup of soy sauce (low sodium)
* 1/3 cup rice wine
* 1/3 cup of Asian sesame oil
* 3 Tbsp. cane sugar
* 1 tsp. dried chili flakes (or fresh red chilis)
* 1/2 tsp. salt
Brown the chicken for a few minutes first before adding the sauce mixture.
Cook the sauce mixture and chicken for 15 - 20 min. or until almost done.
When almost done, add the fresh basil leaves and stir fry, then cover with a lid for 1 min.
Transfer to a serving bowl and serve hot.
\hypertarget{quiche-with-salmon-brocolli-and-blue-cheese}{%
\section{Quiche with salmon, brocolli and blue cheese}\label{quiche-with-salmon-brocolli-and-blue-cheese}}
\begin{itemize}
\item
175 g flour
\item
100g butter
\item
2 tbsp cold water
\item
1 egg yolk
\item
Salt
\item
300 g salmon
\item
200 g brocolli
\item
70 g blue cheese/ gorgonzola
\item
3 eggs
\item
250 g sour cream / fraiche cream or double cream
\item
Salt, pepper, herbs, thyme
\end{itemize}
10-15 minutes before preparing the dough, put water and butter in the freezer.
Grate the butter on a coarse grater, stir with flour and add salt, water and yolk, quickly knead the dough for a short time until it can be collected into a ball. Wrap it in a cling film and cooled in a fridge for 30-40 minutes.
Cover the bottom of the mold with baking paper, roll out the dough between two sheets of parchment to a thickness of 3 mm, put in a mold with a diameter of 18-20 cm, make sides 4-5 cm high, prick with a fork, put in the freezer for 10 minutes.
Put a sheet of baking paper on the dough and load it with either dry rice or peas (to create a heavy weight to keep dough in place). Bake at 200C for 15 minutes with weight and 10 minutes without, until the base is lightly browned.
For pouring: mix eggs, sour cream, salt and pepper, herbs and crumble the cheese. Divide the broccoli into inflorescences and cut the fish into pieces.
When the base is ready, lay out the filling, pour the filling on top and sprinkle with some cheese (additional). Bake at 200 C for 40-50 minutes. Let cool for 15-20 minutes, you can cut it.
\hypertarget{starters}{%
\chapter{Starters}\label{starters}}
\hypertarget{vegetable-tartlets}{%
\section{Vegetable tartlets}\label{vegetable-tartlets}}
1 x 500 g block of ready-made puff pastry
plain flour , for dusting
4 teaspoons pesto
1 handful of mixed, ripe cherry tomatoes
8 asparagus spears
4 baby courgettes
2-3 jarred roasted peppers
½ a bunch of fresh basil
olive oil
8 black olives , optional
1 x 100 g ball mozzarella
20 g Parmesan cheese , optional
Turn on the oven to 200ºC/gas 6. Carefully cut the pastry block in half with a table knife. Wrap the other half and refrigerate or freeze for later.
Dust some flour onto a clean work surface and, using a rolling pin, roll out the pastry into a square, measuring 26cm x 26cm. Cut into 4 equal squares.
Place the pastry squares on a baking tray, leaving a space between each.
Using the back of a spoon, spread the centre of each square with pesto, but don't spread it onto the edges.
Squash the tomatoes into a large mixing bowl, then snap the asparagus spears into 3cm pieces. Keep the lovely pointy tips and a little of the stalk, but discard the end 3cm.
Using a speed peeler, carefully shred the courgettes into ribbons. Tear the roasted peppers into strips and add to the bowl.
Pick the basil leaves, reserving the pretty ones for later. Place the large ones in the mixing bowl.
Mix the vegetables together in the bowl, adding a splash of oil. Pile a little of the mixture on each pesto-smeared tart and top with two olives (if using).
Break up the mozzarella and place little bits on top of each tart -- this will make it gooey like a pizza. Grate over some Parmesan (if using).
Bake for 15 to 20 minutes, until the pastry is golden and the cheese is all bubbly.
Once the tarts are ready, allow to cool slightly. Sprinkle with the reserved basil leaves and serve with a nice salad for lunch.
\hypertarget{desserts}{%
\chapter{Desserts}\label{desserts}}
\hypertarget{tiramisu}{%
\section{Tiramisu}\label{tiramisu}}
\hypertarget{cheesecake}{%
\section{Cheesecake}\label{cheesecake}}
\hypertarget{panna-cotta}{%
\section{Panna Cotta}\label{panna-cotta}}
\includegraphics{images/IMG_2480.jpg}
For 10 portions:
* 1 l Double cream (33\%)
* 150 g sugar
* 6 sheets of gelatine
* 1 vanilla pod
\begin{itemize}
\tightlist
\item
300 g strawberries
\item
120 g sugar
\item
1/2 lemon's juice
\item
3 g pectin
\end{itemize}
Soak gelatin in cold water for a few minutes, squeeze. Slice the vanilla pod lengthwise and scrape out the seeds.
Place the cream, sugar, vanilla seeds and the pod in a saucepan. Dissolve the gelatin in them, stirring occasionally. When it comes to almost a boil, turn off and divide into jars through a sieve. Allow to cool and refrigerate for a couple of hours.
Place all the ingredients for the strawberry salsa in the pan and cook, stirring, until the sugar dissolves, and then until the strawberries soften. Place a few spoonfuls of warm salsa on top of the thickened panna cotta in jars. Refrigerate for 1 hour.
\hypertarget{special-occasions}{%
\chapter{Special occasions}\label{special-occasions}}
\hypertarget{roasted-duck}{%
\section{Roasted duck}\label{roasted-duck}}
\hypertarget{whole-baked-salmon}{%
\section{Whole baked salmon}\label{whole-baked-salmon}}
\hypertarget{breakfast-recipes}{%
\chapter{Breakfast recipes}\label{breakfast-recipes}}
\hypertarget{crepes}{%
\section{Crepes}\label{crepes}}
for two
\begin{itemize}
\tightlist
\item
2 eggs
\item
1 glass of water
\item
1 glass of milk
\item
1 cup self-rising flour
\item
2 tbsp sugar (optional)
\item
2 tbsp vegetable oil
\end{itemize}
Beat eggs with sugar (if using). Add a glass of hot water, whisking with a mixer, then a glass of flour and a glass of warm milk.
Add butter to the dough. Fry in a hot skillet without adding oil.
\hypertarget{true-belgian-waffles}{%
\section{True Belgian Waffles}\label{true-belgian-waffles}}
\begin{itemize}
\tightlist
\item
2 cups all-purpose flour
\item
0.75 cup sugar
\item
3.5 teaspoons baking powder
\item
2 large eggs, separated
\item
1.5 cups whole milk
\item
1 cup butter, melted
\item
1 teaspoon vanilla extract
\item
Sliced fresh strawberries or syrup
\end{itemize}
In a bowl, combine flour, sugar and baking powder. In another bowl, lightly beat egg yolks. Add milk, butter and vanilla; mix well. Stir into dry ingredients just until combined. Beat egg whites until stiff peaks form; fold into batter.
Bake in a preheated waffle iron according to manufacturer's directions until golden brown. Serve with strawberries or syrup.
\hypertarget{gluten-free-pancakeswaffles}{%
\section{Gluten-free pancakes/waffles}\label{gluten-free-pancakeswaffles}}
\begin{itemize}
\tightlist
\item
130 g Yougurt
\item
2 eggs
\item
4 tbsp flour
\item
0.5 tsp
\item
Salt
\end{itemize}
Mix together and cook either on a pan or in a waffle maker without oil.
\hypertarget{cottage-cheese-pancakes}{%
\section{Cottage cheese pancakes}\label{cottage-cheese-pancakes}}
\begin{itemize}
\tightlist
\item
200-250 g cottage cheese
\item
1 egg
\item
4-5 tbsp semolina/flour
\item
vanilla
\end{itemize}
Mix all together and form small balls (around 6-8 items) and bake it until golden. Or cook it on a pan from both sides until golden on medium heat.
\end{document}
| {"hexsha": "8f6cd0f30638f595c77229b090ac91022229ca47", "size": 50082, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "bookdown-demo.tex", "max_stars_repo_name": "AnkaS/recipes", "max_stars_repo_head_hexsha": "eed59420e131d453591797d1769ab8eb4b1bb267", "max_stars_repo_licenses": ["CC0-1.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "bookdown-demo.tex", "max_issues_repo_name": "AnkaS/recipes", "max_issues_repo_head_hexsha": "eed59420e131d453591797d1769ab8eb4b1bb267", "max_issues_repo_licenses": ["CC0-1.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "bookdown-demo.tex", "max_forks_repo_name": "AnkaS/recipes", "max_forks_repo_head_hexsha": "eed59420e131d453591797d1769ab8eb4b1bb267", "max_forks_repo_licenses": ["CC0-1.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 43.361038961, "max_line_length": 813, "alphanum_fraction": 0.7673415598, "num_tokens": 13762} |
\section{Task}
Build a speaker recognition system
| {"hexsha": "b561ee41e20150ec3e56300bbc7f237662811628", "size": 51, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "doc/Midterm-Report/task.tex", "max_stars_repo_name": "juliia5m/knu_voice", "max_stars_repo_head_hexsha": "1f5d150ded23af4c152b8d20f1ab4ecec77b40e1", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 717, "max_stars_repo_stars_event_min_datetime": "2015-01-03T15:25:46.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-30T12:45:45.000Z", "max_issues_repo_path": "doc/Midterm-Report/task.tex", "max_issues_repo_name": "juliia5m/knu_voice", "max_issues_repo_head_hexsha": "1f5d150ded23af4c152b8d20f1ab4ecec77b40e1", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 91, "max_issues_repo_issues_event_min_datetime": "2015-03-19T09:25:23.000Z", "max_issues_repo_issues_event_max_datetime": "2021-05-19T08:51:26.000Z", "max_forks_repo_path": "doc/Midterm-Report/task.tex", "max_forks_repo_name": "juliia5m/knu_voice", "max_forks_repo_head_hexsha": "1f5d150ded23af4c152b8d20f1ab4ecec77b40e1", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 315, "max_forks_repo_forks_event_min_datetime": "2015-01-21T00:06:00.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-29T08:13:36.000Z", "avg_line_length": 17.0, "max_line_length": 35, "alphanum_fraction": 0.8039215686, "num_tokens": 12} |
# -*- coding: utf-8 -*-
"""
Created on Wed Dec 15 21:41:25 2021
@author: Oscar
"""
import math
import numpy as np
def regularFalsi(f, a, b, Tol, N):
i = 1
fa = f(a)
fb = f(b)
print("%-20s %-20s %-20s %-20s %-20s" %
("n","a_n","b_n","p_n","f(p_n)"))
while(i <= N):
sol = (a*f(b)-b*f(a))/(f(b)-f(a))
fp = f(sol)
if(fp==0 or np.abs(f(sol))<Tol):
break
else:
print("%-20.8g %-20.8g %-20.8g %-20.8g %-20.8g\n" % (i, a, b, sol, f(sol)))
i = i + 1
if(fa*fb > 0):
a = sol
else:
b = sol
abs_error=(sol-2)/2
print(abs_error)
return sol
print (i)
sol = 0
a =-1
b = 5
Tol = 1E-6
N = 1000
f = lambda x: x**2+4*x-12
print("Sample input: regulaFalsi(f, 1, 2, 10**-4, 100)")
approxi_phi = regularFalsi(f, a, b, Tol, N)
print(approxi_phi)
| {"hexsha": "74ffcfcca57c2af0a9c420e7f28082d41cc5dcbc", "size": 919, "ext": "py", "lang": "Python", "max_stars_repo_path": "False Position Code.py", "max_stars_repo_name": "oliver779/Computational_Methods_Course", "max_stars_repo_head_hexsha": "e3d96d97ae0b3acaa1b61474eb18b3fbbf8edc9c", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "False Position Code.py", "max_issues_repo_name": "oliver779/Computational_Methods_Course", "max_issues_repo_head_hexsha": "e3d96d97ae0b3acaa1b61474eb18b3fbbf8edc9c", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "False Position Code.py", "max_forks_repo_name": "oliver779/Computational_Methods_Course", "max_forks_repo_head_hexsha": "e3d96d97ae0b3acaa1b61474eb18b3fbbf8edc9c", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 21.3720930233, "max_line_length": 88, "alphanum_fraction": 0.4450489663, "include": true, "reason": "import numpy", "num_tokens": 355} |
subroutine Bamp_mmp(q,mc,ms,Bmmp)
c--- u + g -> c + s + d (t-channel single-charm)
************************************************************************
* *
* AUTHORS: R. FREDERIX AND F. TRAMONTANO *
* DATE : 12/17/2008 *
* *
************************************************************************
implicit none
include 'constants.f'
include 'zprods_com.f'
include 'epinv.f'
include 'stopf1inc.f'
double precision q(mxpart,4),dot,cDs,gDs,cDg,mc,ms,
. mc2,ms2,qsq,s,t,u,xsn,xsd,xs
double complex trc,trg,trs,trsgc,zmmp,Bmmp
mc2=mc**2
ms2=ms**2
cDs=dot(q,3,4)+mc2*dot(q,4,2)/2d0/dot(q,3,2)
. +ms2*dot(q,3,2)/2d0/dot(q,4,2)
cDg=dot(q,3,2)
gDs=dot(q,4,2)
qsq=mc2+ms2+2d0*cDs+2d0*cDg+2d0*gDs
s=ms2+2d0*gDs
t=mc2+2d0*cDg
u=mc2+ms2+2d0*cDs
xsn=(1d0-dsqrt(1d0-4d0*ms*mc/(u-(ms-mc)**2)))
xsd=(1d0+dsqrt(1d0-4d0*ms*mc/(u-(ms-mc)**2)))
xs=-xsn/xsd
trg=2d0*za(5,2)*zb(2,1)
trs=2d0*za(5,4)*zb(4,1)+ms**2*za(5,2)*zb(2,1)/gDs
trc=2d0*za(5,3)*zb(3,1)+mc**2*za(5,2)*zb(2,1)/cDg
trsgc=2d0*zb(1,4)*za(4,2)*zb(2,3)*za(3,5)
zmmp=za(4,3)*zb(2,3)**2
Bmmp = ms2*(cDg**3*gDs*(mc2*ms2*(4*trs*gDs-2*trc*gDs+trsgc-ms2*tr
& g)+2*cDs**2*(-2*trs*gDs+2*trc*gDs-2*trsgc+ms2*trg)+ms2*cDs*(-6*
& trc*gDs+5*trsgc+mc2*trg))+cDg**2*gDs**2*(mc2*ms2*(4*trs*gDs-6*t
& rc*gDs+trsgc-4*ms2*trg+mc2*trg)+2*cDs**2*(-2*trs*gDs+6*trc*gDs-
& 4*trsgc+3*ms2*trg-mc2*trg)+cDs*(-6*ms2*trc*gDs+4*ms2*trsgc+mc2*
& trsgc+mc2*ms2*trg))+cDg*gDs**3*(cDs*(-2*ms2*trc*gDs+ms2*trsgc+2
& *mc2*trsgc+7*mc2*ms2*trg)+2*cDs**2*(6*trc*gDs-3*trsgc+ms2*trg-3
& *mc2*trg)+mc2*ms2*(-6*trc*gDs+trsgc-ms2*trg+4*mc2*trg)-8*trg*cD
& s**3)+gDs**4*(cDs**2*(4*trc*gDs-2*(trsgc+mc2*trg))+mc2*ms2*(-2*
& trc*gDs+trsgc+mc2*trg)-4*trg*cDs**3+mc2*(trsgc+3*ms2*trg)*cDs)+
& 2*ms2*cDg**4*cDs*(trsgc-trc*gDs))*tr5Xs/((cDs**2-mc2*ms2)*gDs**
& 3*(gDs+cDg))/2.0d+0
Bmmp = mc2*(2*mc2*trsgc*cDs*gDs**4+cDg**3*gDs*(cDs*(8*trs*gDs**2-
& 2*(trsgc+3*mc2*trs+2*ms2*trg)*gDs+2*ms2*trsgc+mc2*trsgc+7*mc2*m
& s2*trg)+ms2*(2*gDs*(-2*trs*gDs+2*trsgc+ms2*trg)+mc2*(-6*trs*gDs
& +trsgc+2*ms2*trg)+mc2**2*trg)-2*cDs**2*(-6*trs*gDs+3*trsgc+2*ms
& 2*trg)-8*trg*cDs**3)+cDg**2*gDs**2*(cDs*(4*trs*gDs**2-2*(2*trsg
& c+3*mc2*trs+ms2*trg)*gDs+ms2*trsgc+4*mc2*trsgc+5*mc2*ms2*trg)+m
& s2*(mc2*(-2*trs*gDs+3*trsgc+ms2*trg)+2*trsgc*gDs-2*mc2**2*trg)+
& 2*cDs**2*(2*trs*gDs-5*trsgc-ms2*trg+2*mc2*trg)-4*trg*cDs**3)+cD
& g**4*(cDs*(4*trs*gDs**2-2*(mc2*trs+ms2*trg)*gDs+ms2*(trsgc-mc2*
& trg))+ms2*(-8*trs*gDs**2+2*(trsgc-mc2*trs+2*ms2*trg)*gDs-mc2*(t
& rsgc+ms2*trg))+8*trs*cDs**2*gDs)+cDg*gDs**3*(cDs*(-2*trsgc*gDs-
& 2*mc2*trs*gDs+5*mc2*trsgc+mc2*ms2*trg)+2*(mc2*trg-2*trsgc)*cDs*
& *2+mc2*ms2*(trsgc-mc2*trg))+2*ms2*cDg**5*(trs*(mc2-2*gDs)+ms2*t
& rg))*tr5Xc/(cDg**2*(cDs**2-mc2*ms2)*gDs*(gDs+cDg))/2.0d+0+Bmmp
Bmmp = ms2*(trsgc*(cDs*gDs*(2*cDs*gDs-mc2*gDs+4*cDg*cDs)-ms2*(cDg
& *cDs*(gDs+2*cDg)+mc2*gDs*(gDs+cDg)))+trg*gDs*(-ms2*(mc2*(3*cDs+
& mc2)*gDs+cDg*cDs*(2*cDs+mc2))+2*cDs**2*(2*cDs+mc2)*gDs+mc2*ms2*
& *2*cDg)-2*trc*gDs*(gDs+cDg)*(2*cDs**2*gDs-ms2*(mc2*gDs+cDg*cDs)
& ))*tr4Xs/((cDs-mc*ms)*(cDs+mc*ms)*gDs**2)/2.0d+0+Bmmp
Bmmp = mc2*(trsgc*(-ms2*cDg*(cDg*(2*gDs+cDs)-mc2*(gDs+cDg))-cDs*g
& Ds*(mc2*(2*gDs+cDg)-2*cDg*(gDs+cDs)))-ms2*trg*cDg*(-cDg*(cDs*(2
& *gDs+mc2)+mc2*ms2)+mc2*(cDs+mc2)*gDs+2*ms2*cDg**2)+2*trs*cDg*(m
& s2*cDg-cDs*gDs)*(2*cDg*gDs-mc2*(gDs+cDg)))*tr4Xc/(cDg*(cDs-mc*m
& s)*(cDs+mc*ms)*gDs)/2.0d+0+Bmmp
Bmmp = (cDg**2*(mc2*ms2*(4*trs*gDs**2+2*(trsgc+mc2*trs+2*ms2*trg)
& *gDs+ms2*(mc2*trg-trsgc))+ms2*cDs*(mc2*(-2*trs*gDs+trsgc+ms2*tr
& g)-2*trsgc*gDs)-2*cDs**2*gDs*(4*trs*gDs+trsgc+ms2*trg))+cDg*gDs
& *(-mc2*ms2*(4*trg*gDs**2+2*mc2*(trg-trs)*gDs+mc2*(trsgc+ms2*trg
& ))+2*cDs**2*gDs*(2*trg*gDs+trsgc+mc2*trg)+mc2*ms2*(3*trsgc-mc2*
& trg)*cDs)-2*mc2**2*ms2*trsgc*gDs**2+cDg**3*(-2*ms2*cDs*(-2*trs*
& gDs+mc2*trs+ms2*trg)+4*trs*cDs**2*gDs-4*mc2*ms2*trs*gDs))*tr3Xs
& /(cDg*(cDs**2-mc2*ms2)*gDs)/2.0d+0+Bmmp
Bmmp = mc2*(trg*gDs*(-4*cDs**2*gDs**2+ms2**2*(mc2*gDs-cDg*(cDs+mc
& 2))+mc2*ms2*gDs*(4*gDs+cDs))-ms2*trsgc*(gDs*(mc2*gDs-cDs*(gDs+3
& *cDg))+ms2*cDg*(gDs+2*cDg))-2*ms2*trc*gDs*(gDs+cDg)*(cDs*gDs-ms
& 2*cDg))*tr3Xc/((cDs-mc*ms)*(cDs+mc*ms)*gDs**2)/2.0d+0+Bmmp
Bmmp = (-cDg**3*(cDs**2*(4*(3*trs+trc)*gDs**2+(-2*trsgc-5*ms2*trs
& -2*ms2*trg+2*ms2*trc)*gDs+ms2*(trsgc-ms2*trg))+mc2*ms2*(-4*(2*t
& rs+trc)*gDs**2+(2*trsgc+3*ms2*trs+4*ms2*trg)*gDs+ms2*(ms2*trg-t
& rsgc))+2*ms2*(trsgc-mc2*trs+ms2*trc)*cDs*gDs)+cDg**2*gDs*(cDs**
& 2*(-4*(trc-2*trs)*gDs**2+(6*trsgc+ms2*(3*trs+2*(trg+trc))+2*mc2
& *trg)*gDs+ms2*(trsgc+ms2*trs+mc2*trg))-mc2*ms2*(-4*(trc-2*trs)*
& gDs**2+(4*trsgc+2*mc2*(trs+trg)+3*ms2*trs+2*ms2*trg)*gDs+ms2*(t
& rsgc+ms2*trs+mc2*trg))-mc2*ms2*cDs*((2*trs+6*trg-2*trc)*gDs+4*t
& rsgc+ms2*trs)+cDs**3*(8*trg*gDs+4*trsgc+ms2*trs))-2*cDg*(cDs-mc
& *ms)*(cDs+mc*ms)*gDs**2*(trg*gDs*(2*gDs+mc2)+trsgc*(gDs+mc2)+ms
& 2*trs*(-gDs-cDs)+ms2**2*(trg+trc))+ms2*(trs+trg+trc)*(cDs-mc*ms
& )*(cDs+mc*ms)*gDs**3*(2*gDs+2*cDs+ms2-mc2)+4*ms2*trs*cDg**4*cDs
& *gDs)*lVs/(cDg**2*(cDs-mc*ms)*(cDs+mc*ms)*gDs**2)/4.0d+0+Bmmp
Bmmp = (cDg**2*gDs*(-mc2*ms2*(8*trs*gDs**3+2*(trsgc+ms2*trg)*gDs*
& *2+ms2*(4*trsgc-3*mc2*trs+2*mc2*trg-4*mc2*trc)*gDs+mc2*ms2*(trs
& gc-ms2*trs+ms2*trg))+cDs**2*(8*trs*gDs**3+2*(trsgc+ms2*trg)*gDs
& **2+ms2*(4*trsgc-5*mc2*trs+4*mc2*trg-2*mc2*trc)*gDs+mc2*ms2*(tr
& sgc-ms2*trs+ms2*trg))+cDs**3*(8*trs*gDs**2+2*(trsgc+ms2*trg)*gD
& s+4*ms2*trsgc-mc2*ms2*trs)+mc2*ms2*cDs*(-8*trs*gDs**2-2*mc2*trs
& *gDs-2*ms2*trg*gDs+2*ms2*trc*gDs-4*ms2*trsgc+mc2*ms2*trs))-2*cD
& g**3*(gDs*(mc2*ms2*(-2*trs*gDs**2-2*ms2*(2*trs*gDs+mc2*(trs+trg
& ))+ms2**2*(2*trg+trc))+cDs**2*(2*trs*gDs*(gDs+2*ms2)+mc2*ms2*(t
& rs+2*trg)-2*ms2**2*trg)+2*trs*cDs**3*gDs+mc2*ms2**2*(-trs+trg+t
& rc)*cDs)+ms2**2*trsgc*(mc2*(gDs-ms2)+cDs**2))-cDg*(cDs-mc*ms)*(
& cDs+mc*ms)*gDs**2*(mc2*(ms2*(2*trs*(gDs+cDs)+trsgc-2*ms2*(trg+t
& rc))+2*trg*gDs*(gDs+cDs))+2*gDs*(gDs+cDs)*(2*trg*gDs+trsgc)-mc2
& **2*ms2*trg)+mc2*ms2*(trs+trg+trc)*(mc*ms-cDs)*(cDs+mc*ms)*gDs*
& *3*(2*gDs+2*cDs+ms2-mc2)+4*mc2*ms2**2*trs*cDg**4*gDs)*lVc/(ms2*
& cDg**2*(cDs-mc*ms)*(cDs+mc*ms)*gDs**2)/4.0d+0+Bmmp
Bmmp = xs*cDs*(cDg**2*(4*trs*gDs**2-2*ms2*trg*gDs+ms2*trsgc)+mc2*
& trsgc*gDs**2-2*trsgc*cDg*gDs*(gDs+cDs))*lRcs/(mc*ms*(xs**2-1)*c
& Dg*gDs**2)+Bmmp
Bmmp = tr3c00fs*(gDs*(-mc2*(2*(2*trs+5*trg)*gDs**2+(2*trsgc+3*ms2
& *trs+8*ms2*trg)*gDs+ms2*(trsgc+ms2*trg))+cDs*(2*gDs+ms2)*(2*trc
& *gDs-mc2*trg)-trsgc*cDs*(4*gDs+ms2))+cDg*(cDs*(4*(trg+trc)*gDs*
& *2+2*(trsgc+ms2*(3*trs+5*trg+trc))*gDs+ms2*(trsgc+ms2*trg))+trs
& gc*(2*gDs**2+4*ms2*gDs+ms2**2)-(gDs+ms2)*(2*gDs+ms2)*(2*trc*gDs
& -mc2*trg-3*ms2*trc))-cDg**2*(4*(3*trs+2*trg+trc)*gDs**2-2*trsgc
& *gDs+2*ms2*(3*trs+trg+3*trc)*gDs-ms2*trsgc+ms2**2*(3*trs+2*(trg
& +trc))))/gDs**3+Bmmp
Bmmp = Bmmp-epinv*(mc*ms*(xs**2-1)-4*lp*xs*cDs)*(cDg**2*(4*trs*g
& Ds**2-2*ms2*trg*gDs+ms2*trsgc)+mc2*trsgc*gDs**2-2*trsgc*cDg*gDs
& *(gDs+cDs))/(mc*ms*(xs**2-1)*cDg*gDs**2)/4.0d+0
Bmmp = tr2fu*cDs*(cDg**2*(4*trs*gDs**2-2*ms2*trg*gDs+ms2*trsgc)+m
& c2*trsgc*gDs**2-2*trsgc*cDg*gDs*(gDs+cDs))/(cDg*gDs**2)+Bmmp
Bmmp = Bmmp-3*tr3c001fs*(-ms2*cDg*(4*trc*gDs**2+2*((trs+trg)*cDs
& +2*ms2*trc)*gDs+ms2**2*trc)+(trs+trg)*cDg**2*(4*gDs**2+2*ms2*gD
& s+ms2**2)+mc2*(trs+trg)*gDs**2*(2*gDs+ms2))/gDs**3
Bmmp = Bmmp-LsB2*(cDg*(cDs*(mc2*(-2*trs*gDs+trsgc+ms2*trg)-2*trs
& gc*gDs)+2*(mc2*trg-trsgc)*cDs**2+mc2*ms2*(trsgc-mc2*trg))+2*mc2
& *trsgc*cDs*gDs-2*cDg**2*cDs*(trs*(mc2-2*gDs)+ms2*trg))*(mc2*gDs
& **2-2*cDg*cDs*gDs+ms2*cDg**2)/(cDg**2*(cDs**2-mc2*ms2)*gDs)
Bmmp = LsB1*(gDs**3*(cDs**3*(8*trc*gDs-4*(trsgc+mc2*trg))+3*mc2*m
& s2*cDs*(-2*trc*gDs+trsgc+mc2*trg)-8*trg*cDs**4+8*mc2*ms2*trg*cD
& s**2+mc2**2*ms2*(trsgc-ms2*trg))+2*cDg*gDs**2*(cDs**3*(4*trc*gD
& s-2*trsgc+4*ms2*trg)-3*mc2*ms2*cDs*(trc*gDs+ms2*trg)+2*ms2*cDs*
& *2*(-2*trc*gDs+trsgc+mc2*trg)-mc2*ms2**2*(-2*trc*gDs+trsgc+mc2*
& trg))-ms2*cDg**2*gDs*(2*cDs**2*(4*trc*gDs-3*trsgc+ms2*trg)+ms2*
& cDs*(-2*trc*gDs+trsgc+mc2*trg)+mc2*ms2*(-4*trc*gDs+trsgc-ms2*tr
& g))+2*ms2**2*cDg**3*cDs*(trc*gDs-trsgc))/((cDs**2-mc2*ms2)*gDs*
& *3)+Bmmp
Bmmp = 3*tr3c002fs*(cDg*(trc*cDs*(2*gDs+ms2)**2-4*mc2*(trs+trg)*g
& Ds**2)-mc2*trc*gDs**2*(2*gDs+ms2))/gDs**3+Bmmp
Bmmp = (cDg**4*(144*trs*gDs**6+24*ms2*(11*trs+6*trc)*gDs**5+cDs*(
& 2*gDs+ms2)*(144*trs*gDs**4-40*ms2*(3*trg+2*trc)*gDs**3-2*ms2*(-
& 36*trsgc+19*ms2*trs+23*mc2*trs+52*ms2*trg+4*ms2*trc+2*mc2*trc)*
& gDs**2+2*ms2*(mc2*(-9*trsgc-8*ms2*trs-17*ms2*trg+7*ms2*trc)+ms2
& **2*(8*trc-9*trg))*gDs+mc2*ms2**2*(-9*trsgc-9*ms2*trg+8*ms2*trc
& ))+8*ms2*(15*trsgc+3*ms2*trs+mc2*(-25*trs+6*trg+10*trc)+21*ms2*
& trg+44*ms2*trc)*gDs**4+2*ms2*(6*ms2*(-10*trsgc-5*ms2*trs+6*ms2*
& trg+15*ms2*trc)-2*mc2**2*(10*trs+19*trg)+3*mc2*ms2*(-7*trs+32*t
& rg+48*trc))*gDs**3-ms2**2*(6*ms2*(21*trsgc+ms2*trs+2*ms2*trc)-m
& c2*(-72*trsgc+39*ms2*trs+76*ms2*trg+180*ms2*trc)+4*mc2**2*(5*tr
& s+23*trg))*gDs**2+72*trs*cDs**2*gDs**3*(2*gDs+ms2)-ms2**3*(mc2*
& (54*trsgc+ms2*(-8*trs+trg-12*trc))+2*ms2*(9*trsgc+8*ms2*trc)+45
& *mc2**2*trg)*gDs-mc2*ms2**4*(9*(trsgc+mc2*trg)+8*ms2*trc))+cDg*
& *3*gDs*(-144*trs*gDs**6-72*(trsgc-7*ms2*trs-mc2*trs+5*ms2*trg)*
& gDs**5-cDs*(2*gDs+ms2)*(144*trs*gDs**4+72*(trsgc-mc2*trs+ms2*(t
& rg+trc))*gDs**3+2*ms2*(9*(-6*trsgc+ms2*trs+2*ms2*trc)+mc2*(19*t
& rs+30*trg+20*trc))*gDs**2+ms2*(-18*ms2*trsgc+mc2*(-54*trsgc+19*
& ms2*trs+16*ms2*trg+4*ms2*trc)+mc2**2*(23*trs-18*trg))*gDs+mc2*m
& s2**2*(9*ms2*trg-9*mc2*trg-8*ms2*trc))+4*ms2*(3*trsgc+2*mc2*(64
& *trs+13*trg+9*trc)+57*ms2*trs-9*ms2*trg)*gDs**4+2*ms2*(mc2*(48*
& trsgc+ms2*(123*trs+182*trg+44*trc))+3*ms2*(4*trsgc+ms2*(-7*trs+
& 16*trg+4*trc))-5*mc2**2*(-3*trs-4*trg+2*trc))*gDs**3+ms2*(mc2**
& 2*(36*trsgc+ms2*(37*trs+142*trg-10*trc))+2*mc2*ms2*(15*trsgc+ms
& 2*(-4*trs+93*trg+23*trc))+6*ms2**3*(2*(trg+trc)-trs))*gDs**2-36
& *cDs**2*gDs**2*(2*gDs+ms2)*(2*trs*gDs+trsgc-mc2*trs+ms2*trg)+mc
& 2*ms2**2*(36*mc2*trsgc+ms2*(-27*trsgc+11*mc2*trs+61*mc2*trg)-3*
& ms2**2*(trs-6*trg+2*trc))*gDs+mc2*ms2**3*(-9*ms2*trsgc+9*mc2*tr
& sgc-8*ms2**2*trc))+cDg**2*gDs**2*(2*gDs+ms2)*(72*(trg-2*trs)*gD
& s**5+cDs*(144*(trg-2*trs)*gDs**4-72*(mc2*(trs-trg)+ms2*trg)*gDs
& **3-4*(mc2*(9*trsgc-5*ms2*trs+15*ms2*trg+6*ms2*trc)+3*ms2**2*(2
& *trs+trg+trc))*gDs**2-mc2*ms2*(-54*trsgc+9*ms2*(trs-2*trg+2*trc
& )+19*mc2*trs+18*mc2*trg)*gDs+9*mc2*ms2*(mc2*(trsgc+2*ms2*trg)+m
& s2*trsgc))-36*(mc2*(trs-trg)+5*ms2*trg)*gDs**4+2*(mc2*(ms2*(174
& *trs-113*trg+22*trc)-9*trsgc)-6*ms2*(6*trsgc+ms2*(2*trs+trg+trc
& )))*gDs**3+18*cDs**2*gDs*(4*(trg-2*trs)*gDs**2-2*(mc2*(trs-trg)
& +ms2*trg)*gDs-mc2*(trsgc+ms2*trg))+ms2*(-mc2*(60*trsgc+ms2*(3*t
& rs-22*trg+32*trc))+mc2**2*(145*trs-12*trg+16*trc)+6*ms2**2*(-tr
& s+trg+trc))*gDs**2+mc2*ms2*(18*ms2*trsgc+mc2*(ms2*(11*trs+57*tr
& g-22*trc)-9*trsgc)+18*mc2**2*trs+ms2**2*(6*(trg+trc)-3*trs))*gD
& s+9*mc2**2*ms2**2*(2*trsgc+(ms2+mc2)*trg))+cDg*gDs**3*(2*gDs+ms
& 2)*(72*trg*gDs**5+cDs*(144*trg*gDs**4+72*(trsgc+2*mc2*(trg-trs)
& )*gDs**3+12*(mc-ms)*(ms+mc)*(ms2*(trs+trg+trc)+3*mc2*trg)*gDs**
& 2+2*mc2*ms2*(-9*trsgc+mc2*(5*(trs-3*trg)+3*trc)-3*ms2*(2*trs+tr
& g+trc))*gDs+9*mc2**2*ms2*(ms2-mc2)*trg)+36*(trsgc+2*mc2*(trg-tr
& s))*gDs**4+6*(mc2*ms2*(2*trs-25*trg+2*trc)-2*ms2**2*(trs+trg+tr
& c)+3*mc2**2*trg)*gDs**3+18*cDs**2*gDs*(4*trg*gDs**2+2*(trsgc+2*
& mc2*(trg-trs))*gDs+mc2*(mc-ms)*(ms+mc)*trg)-2*ms2*(-3*mc2*(ms2*
& (trg+trc)-15*trsgc)-2*mc2**2*(29*trs-28*trg+8*trc)+3*ms2**2*(tr
& s+trg+trc))*gDs**2+mc2*ms2*(-mc2*(54*trsgc+ms2*(-6*trs+25*trg+1
& 6*trc))+mc2**2*(25*trs-22*trg+5*trc)+3*ms2**2*(-trs+trg+trc))*g
& Ds+9*mc2**2*ms2*(ms2-mc2)*trsgc)-ms2*cDg**5*(16*(16*trs+trg-18*
& trc)*gDs**4+4*(36*trsgc+ms2*(25*trs+4*trg-160*trc)+mc2*(trs+40*
& trg-18*trc))*gDs**3+2*cDs*(2*gDs+ms2)*(4*(9*trg+trc)*gDs**2+2*(
& 9*trsgc+ms2*(8*trs+26*trg-7*trc))*gDs+ms2*(9*trsgc+9*ms2*trg-8*
& ms2*trc))+2*(3*mc2*(6*trsgc+ms2*(-5*trs+38*trg-24*trc))+144*ms2
& *trsgc+ms2**2*(-17*trs+10*trg-180*trc))*gDs**2+36*ms2*(4*ms2+mc
& 2)*trsgc*gDs-2*ms2**2*(mc2*(16*trs-56*trg+45*trc)+ms2*(8*trs-tr
& g+12*trc))*gDs+9*ms2**2*(2*ms2+mc2)*trsgc+ms2**3*(mc2*(-8*trs+1
& 9*trg-18*trc)+16*ms2*trc))+mc2*gDs**4*(2*gDs+ms2)*(36*trg*gDs**
& 4+cDs*(72*trg*gDs**3+36*(trsgc+mc2*trg)*gDs**2+6*(mc-ms)*(ms+mc
& )*ms2*(trs+trg+trc)*gDs-9*mc2*ms2*(trsgc+mc2*trg))+18*(trsgc+mc
& 2*trg)*gDs**3-6*ms2*(ms2*(trs+trg+trc)-mc2*(trs-5*trg+trc))*gDs
& **2+18*cDs**2*gDs*(2*trg*gDs+trsgc+mc2*trg)+ms2*(3*mc2*(2*ms2*(
& trs+trg+trc)-9*trsgc)+mc2**2*(7*trs-22*trg+5*trc)-3*ms2**2*(trs
& +trg+trc))*gDs-9*mc2**2*ms2*(trsgc+ms2*trg))+2*ms2*cDg**6*(2*gD
& s+ms2)*(4*(8*trs-trg+9*trc)*gDs**2+2*(ms2*(8*trs-10*trg+27*trc)
& -9*trsgc)*gDs+ms2*(ms2*(8*trs-trg+18*trc)-9*trsgc)))/(ms2*cDg**
& 2*(2*cDg+mc2)*gDs**3*(gDs+cDg)*(2*gDs+ms2))/1.2d+1+Bmmp
Bmmp = tr3s00ft*(cDg*(8*trs*gDs**4+2*(8*trs*cDs+trsgc+5*ms2*trg)*
& gDs**3+2*(4*trs*cDs**2+2*(trsgc+ms2*trg)*cDs+ms2*(2*trsgc-5*mc2
& *trs+3*mc2*trg))*gDs**2+ms2*(2*trg*cDs**2+mc2*(-4*trs*cDs+2*trg
& *cDs-2*mc2*trs+5*ms2*trg+6*ms2*trc))*gDs+2*trsgc*cDs**2*gDs+mc2
& *ms2*(-trsgc*(cDs+ms2)-ms2*trg*(cDs+mc2)))+gDs*(-4*trg*gDs**4+c
& Ds*(-8*trg*gDs**3-4*(trsgc+mc2*trg)*gDs**2+mc2*ms2*(trsgc+mc2*t
& rg))-2*(trsgc+mc2*trg)*gDs**3+4*mc2*ms2*trg*gDs**2-2*cDs**2*gDs
& *(2*trg*gDs+trsgc+mc2*trg)+mc2*ms2*(3*trsgc+mc2*trs+3*mc2*trg)*
& gDs+mc2**2*ms2*(trsgc+ms2*trg))-cDg**2*(4*trs*gDs**3+8*trs*(cDs
& +2*ms2)*gDs**2+4*(trs*cDs**2+ms2*trsgc+ms2**2*trg)*gDs+mc2*ms2*
& (ms2*trg-2*trs*cDs))+8*ms2*trs*cDg**3*gDs)/(ms2*cDg**2*gDs)+Bm
& mp
Bmmp = B0cgsf*(cDg**4*gDs*(4*trs*gDs**4+cDs*(8*trs*gDs**3+(-2*trs
& gc+8*ms2*trs-26*ms2*trg+4*ms2*trc)*gDs**2+ms2*(-6*trsgc-5*ms2*t
& rs-2*mc2*trs-8*ms2*trg+2*mc2*trg+2*ms2*trc)*gDs+ms2**2*(-trsgc-
& 2*ms2*trg+mc2*trg+2*ms2*trc))+(ms2*(28*trs-2*trg+24*trc)-2*trsg
& c)*gDs**3+4*trs*cDs**2*gDs**2+ms2*(-14*trsgc-7*ms2*trs-12*ms2*t
& rg+4*mc2*(trc-trg)+20*ms2*trc)*gDs**2+ms2*(mc2*trsgc+2*ms2*(mc2
& *(trs+trg)-3*trsgc)+4*ms2**2*trc)*gDs-ms2**2*(2*ms2*trsgc-mc2*t
& rsgc+mc2*ms2*trg+2*ms2**2*trc))+cDg**5*(4*trs*gDs**4+4*(trs*cDs
& +4*ms2*(trs+trc))*gDs**3+ms2*(ms2*(-trs-8*trg+18*trc)-2*(2*trg*
& cDs+4*trsgc+mc2*(trs+trg)))*gDs**2+ms2*(2*(ms2*(-trs-3*trg+trc)
& -trsgc)*cDs-ms2*(4*trsgc+ms2*(-2*trs+trg-5*trc)+3*mc2*trg))*gDs
& -ms2**2*((trsgc+ms2*(trg-trc))*cDs+ms2*(trsgc+mc2*trg+ms2*trc))
& )+cDg*gDs**4*(4*(3*trg-2*trs)*gDs**4+cDs*(4*(5*trg-4*trs)*gDs**
& 3+2*(3*trsgc-2*ms2*trg+5*mc2*trg)*gDs**2+2*ms2*(mc2*(3*trs+trg+
& 2*trc)-ms2*(3*trs+2*trg+2*trc))*gDs-mc2*ms2*(trsgc-ms2*trg+2*mc
& 2*trg))+(4*trsgc-6*ms**2*trg+6*mc**2*trg)*gDs**3-2*ms2*(trsgc+3
& *ms2*trs+mc2*(-8*trs+3*trg-3*trc)+2*ms2*trg+2*ms2*trc)*gDs**2+2
& *cDs**2*gDs*(4*(trg-trs)*gDs+trsgc-ms2*trg+2*mc2*trg)+ms2*(mc2*
& (-4*trsgc+4*ms2*trs+ms2*trg)+mc2**2*(2*trs-7*trg)-2*ms2**2*trs)
& *gDs+mc2*ms2*(ms2*trsgc-mc2*(2*trsgc+ms2*trg)))+cDg**3*gDs**2*(
& 4*(trg-3*trs)*gDs**4+cDs*(4*(trg-3*trs)*gDs**3+2*(-3*trsgc+2*ms
& 2*(2*trs-9*trg+2*trc)+mc2*trg)*gDs**2-2*ms2*(5*trsgc+ms2*(3*trs
& +7*trg+trc)+mc2*trs+5*mc2*trg)*gDs+ms2*(ms2*(trsgc+3*mc2*trg)+m
& c2*trsgc+ms2**2*(trc-trg)))+2*(-2*trsgc+ms2*(12*trs-5*trg+8*trc
& )+mc2*trg)*gDs**3+ms2*(-14*trsgc+2*mc2*(7*trs+5*trc)+ms2*(-9*tr
& s-4*trg+10*trc))*gDs**2-2*(trsgc+9*ms2*trg)*cDs**2*gDs+ms2*(2*m
& c2*(trsgc+2*ms2*(trs+trg-trc))-6*ms2*trsgc+mc2**2*(2*trs-3*trg)
& +ms2**2*(-2*trs+trg+3*trc))*gDs+ms2**2*(-ms2*trsgc+3*mc2*trsgc+
& mc2*ms2*trg+mc2**2*trg-ms2**2*trc))+cDg**2*gDs**3*(4*(3*trg-5*t
& rs)*gDs**4+cDs*(16*(trg-2*trs)*gDs**3+2*(-trsgc-9*ms2*trg+4*mc2
& *trg+2*ms2*trc)*gDs**2-ms2*(6*trsgc-2*mc2*(2*trs-2*trg+trc)+7*m
& s2*trs+6*ms2*trg+4*ms2*trc)*gDs+ms2*(mc2*(trsgc+3*ms2*trg)+ms2*
& trsgc-mc2**2*trg))+2*(ms2*(4*trs-7*trg+2*trc)+3*mc2*trg)*gDs**3
& +ms2*(-8*trsgc+2*mc2*(13*trs-trg+5*trc)-9*ms2*trs)*gDs**2+2*cDs
& **2*gDs*(trg*(2*gDs-6*ms2+mc2)-6*trs*gDs-trsgc)+ms2*(-3*ms2*trs
& gc+mc2**2*(4*trs-5*trg)+2*mc2*ms2*(2*trs+trg-3*trc)+ms2**2*(3*(
& trg+trc)-2*trs))*gDs+mc2*ms2*(3*ms2*trsgc-mc2*trsgc+ms2**2*trg+
& mc2*ms2*trg))+gDs**5*(4*trg*gDs**4+cDs*(8*trg*gDs**3+4*(trsgc+m
& c2*trg)*gDs**2+2*(mc-ms)*(ms+mc)*ms2*(trs+trg+trc)*gDs-mc2*ms2*
& (trsgc+mc2*trg))+2*(trsgc+mc2*trg)*gDs**3-2*ms2*(ms2*(trs+trg+t
& rc)-mc2*(trs-trg+trc))*gDs**2+2*cDs**2*gDs*(2*trg*gDs+trsgc+mc2
& *trg)-ms2*(mc2*(3*trsgc-2*ms2*(trs+trg+trc))+ms2**2*(trs+trg+tr
& c)+3*mc2**2*trg)*gDs-mc2**2*ms2*(trsgc+ms2*trg))+ms2*cDg**6*(4*
& (trs+trc)*gDs**2-2*trsgc*gDs+2*ms2*(trs-trg+3*trc)*gDs-ms2*trsg
& c+ms2**2*(trs+2*trc)))/(ms2*cDg**2*gDs**3*(gDs+cDg)**2)/4.0d+0+
& Bmmp
Bmmp = lc*(cDg*(8*trg*gDs**4+4*(trsgc+mc2*(trg-2*trs))*gDs**3+2*c
& Ds*gDs*(4*trg*gDs**2+2*(trsgc+mc2*(trg-2*trs))*gDs-mc2*(trsgc+m
& s2*trg))-2*mc2*(trsgc+ms2*trg)*gDs**2+2*mc2*ms2*(trsgc-mc2*(trs
& +2*trg))*gDs+mc2**2*ms2*(trsgc+ms2*trg))-2*cDg**2*(8*trs*gDs**3
& +2*(4*trs*cDs+trsgc-mc2*trs+ms2*trg)*gDs**2+2*((trsgc+ms2*trg)*
& cDs-mc2*(trs*cDs-ms2*trs+ms2*trc))*gDs+mc2*ms2*(-trsgc+mc2*trs-
& ms2*trg))+mc2*gDs*(trg*(2*gDs*(gDs+cDs)*(2*gDs+mc2)-mc2**2*ms2)
& +trsgc*(2*gDs*(gDs+cDs)+mc2*ms2))+2*trs*cDg**3*(4*gDs*(gDs+cDs)
& -3*mc2*ms2))/(ms2*(2*cDg+mc2)**2*gDs)/2.0d+0+Bmmp
Bmmp = BfunX*(cDg**3*(4*(3*trs+trg)*gDs**3+cDs*(4*(3*trs+2*trg+tr
& c)*gDs**2+2*ms2*(trs+trg+2*trc)*gDs+ms2**2*trc)+ms2*(7*trs+4*tr
& g+2*trc)*gDs**2+3*ms2**2*trc*gDs+ms2**3*trc)+cDg**4*((6*trs+4*t
& rg)*gDs**2-2*ms2*(trs+trg)*gDs-ms2**2*(trs+trg))+cDg**2*gDs**2*
& (trs*(ms2*(5*gDs+3*cDs)+2*(gDs+cDs)*(5*gDs+cDs)+ms2**2)-4*ms2*(
& trg+trc)*gDs)+(trs+trg+trc)*gDs**4*(2*(gDs+cDs)+ms2)**2+2*cDg*g
& Ds**3*((trg+trc)*(gDs-ms2)+2*trs*gDs+trs*cDs)*(2*(gDs+cDs)+ms2)
& )/(cDg**2*gDs**3)/4.0d+0+Bmmp
Bmmp = B0csf*(cDg**2*(cDs*((4*mc2*trs-2*ms2*trs)*gDs**2+(2*ms2*(t
& rsgc+ms2*trc)+mc2*(ms2*(trs-3*trg+4*trc)-trsgc)+mc2**2*trs)*gDs
& +2*mc2*ms2**2*trg)+mc2*ms2*(2*trs*gDs**2+(trsgc-mc2*trg+ms2*trc
& +mc2*trc)*gDs+ms2*(ms2+mc2)*trg)-2*cDs**3*(2*(trs-trg+trc)*gDs+
& ms2*trg)-cDs**2*(2*(mc2*trc-ms2*(-trs+trg+trc))*gDs+ms2*(ms2+mc
& 2)*trg))+cDg*gDs*(cDs*(2*mc2*trs*gDs**2+(ms2*(trsgc+ms2*trc)+mc
& 2*(-2*trsgc+4*ms2*trs-5*ms2*trg+5*ms2*trc)+2*mc2**2*trs)*gDs-2*
& mc2*ms2*(trsgc+2*ms2*trg+3*mc2*trg))-mc2*ms2*(-2*trs*gDs**2+(tr
& sgc-mc2*(trs+2*trc)+ms2*(-trs+trg-2*trc))*gDs+(ms2+mc2)*(trsgc+
& mc2*trg))+cDs**2*(-2*(ms2*(trs-trg+trc)+mc2*(-trs+trg+2*trc))*g
& Ds+mc2*(trsgc-7*ms2*trg)+ms2*trsgc+mc2**2*trg)+2*cDs**3*(-2*(tr
& s-trg+2*trc)*gDs+trsgc+2*ms2*trg+3*mc2*trg)+8*trg*cDs**4)+cDg**
& 3*(cDs*(mc2*(2*trs*gDs+ms2*(-2*trs+trg+trc))+ms2*(-4*trs*gDs+tr
& sgc+ms2*trc))+mc2*ms2*(-trs*(2*gDs+mc2)+trsgc+ms2*(trg-trs))+2*
& ms2*trc*cDs**2)+gDs**2*(mc2*cDs*((-trsgc+ms2*(trs-trg+2*trc)+mc
& 2*trs)*gDs-2*ms2*(trsgc+(ms2+mc2)*trg))+cDs**2*(-2*(mc2*(-trs+t
& rg+trc)+ms2*trc)*gDs+mc2*(trsgc-4*ms2*trg)+ms2*trsgc)+mc2*ms2*(
& mc2*trg*gDs+(ms2+mc2)*trc*gDs+trsgc*(-gDs-ms2-mc2))+2*cDs**3*(-
& 2*trc*gDs+trsgc+(ms2+mc2)*trg)+4*trg*cDs**4)-2*ms2*trs*cDg**4*(
& cDs+mc2))/((cDs-mc*ms)*(cDs+mc*ms)*gDs*(gDs+cDg)**2)/2.0d+0+Bm
& mp
Bmmp = 3*mc2*tr3s002ft*((trg+trc)*((2*cDg+mc2)*gDs**2+ms2*cDg**2)
& -trs*cDg**2*(gDs+cDs))/(cDg**2*gDs)+Bmmp
Bmmp = ls*(gDs*(2*gDs+ms2)*((2*gDs+ms2)*(2*trc*gDs-trg*(4*cDs+mc2
& ))-ms2*trsgc)+cDg*(-trsgc*(2*gDs+ms2)**2+8*trc*gDs**3-4*ms2*trs
& *gDs**2+8*ms2*trc*gDs**2+4*ms2**2*trg*gDs+2*ms2**2*trc*gDs+ms2*
& *3*trg))/(gDs*(2*gDs+ms2)**2)/2.0d+0+Bmmp
Bmmp = 3*mc2*tr3s001ft*(mc2*trs*gDs-2*trs*cDg*cDs+2*ms2*(trg+trc)
& *cDg)/cDg**2+Bmmp
Bmmp=Bmmp/zmmp
return
end
| {"hexsha": "5db6888ae702887bc0181be56ca4ee35d6c0ac6e", "size": 21460, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "MCFM-JHUGen/src/Stopb/Bmmp.f", "max_stars_repo_name": "tmartini/JHUGen", "max_stars_repo_head_hexsha": "80da31668d7b7eb5b02bb4cac435562c45075d24", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2015-06-08T13:09:28.000Z", "max_stars_repo_stars_event_max_datetime": "2020-09-04T19:59:36.000Z", "max_issues_repo_path": "MCFM-JHUGen/src/Stopb/Bmmp.f", "max_issues_repo_name": "tmartini/JHUGen", "max_issues_repo_head_hexsha": "80da31668d7b7eb5b02bb4cac435562c45075d24", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 64, "max_issues_repo_issues_event_min_datetime": "2015-06-24T15:08:17.000Z", "max_issues_repo_issues_event_max_datetime": "2022-01-25T04:59:32.000Z", "max_forks_repo_path": "MCFM-JHUGen/src/Stopb/Bmmp.f", "max_forks_repo_name": "tmartini/JHUGen", "max_forks_repo_head_hexsha": "80da31668d7b7eb5b02bb4cac435562c45075d24", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 19, "max_forks_repo_forks_event_min_datetime": "2015-05-04T22:15:41.000Z", "max_forks_repo_forks_event_max_datetime": "2021-07-06T10:04:40.000Z", "avg_line_length": 60.9659090909, "max_line_length": 73, "alphanum_fraction": 0.5356477167, "num_tokens": 12833} |
function part1(input)
state = parse_input(input, 3)
for i = 1:6
state = simulate_cycle(state)
end
return length(state)
end
function part2(input)
state = parse_input(input, 4)
for i = 1:6
state = simulate_cycle(state)
end
return length(state)
end
function parse_input(input, dims)
state = Set{CartesianIndex{dims}}()
for (i, line) in enumerate(readlines(input))
for (j, c) in enumerate(split(line, ""))
if c == "#"
push!(state, CartesianIndex(i, j, fill(0, dims - 2)...))
end
end
end
return state
end
function simulate_cycle(state::Set{CartesianIndex{N}}) where N
neighbors = Dict{CartesianIndex{N}, Int}()
neighborhood = CartesianIndices(ntuple(i -> -1:1, N))
for position in state
for Δ in neighborhood
if Δ == zero(CartesianIndex{N})
neighbors[position + Δ] = get(neighbors, position + Δ, 0) + 1
else
neighbors[position + Δ] = get(neighbors, position + Δ, 0) + 2
end
end
end
return Set(position
for (position, num_neighbors) in neighbors
if 5 <= num_neighbors <= 7)
end
| {"hexsha": "694a0fc89c0ff46c4b80407e70f887cae086a283", "size": 1229, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "2020/day17.jl", "max_stars_repo_name": "GunnarFarneback/AdventOfCode.jl", "max_stars_repo_head_hexsha": "2f60011747bfe5d27e954f914f39b4ea2f7b0722", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2021-12-01T16:33:46.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-12T21:46:33.000Z", "max_issues_repo_path": "2020/day17.jl", "max_issues_repo_name": "GunnarFarneback/AdventOfCode.jl", "max_issues_repo_head_hexsha": "2f60011747bfe5d27e954f914f39b4ea2f7b0722", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "2020/day17.jl", "max_forks_repo_name": "GunnarFarneback/AdventOfCode.jl", "max_forks_repo_head_hexsha": "2f60011747bfe5d27e954f914f39b4ea2f7b0722", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 27.3111111111, "max_line_length": 77, "alphanum_fraction": 0.5728234337, "num_tokens": 324} |
dyn.load('/Library/Java/JavaVirtualMachines/jdk1.8.0_131.jdk/Contents/Home/jre/lib/server/libjvm.dylib')
setwd("/Users/mengmengjiang/all datas/voltage")
library(xlsx)
par(mfrow = c(2,2), mar = c(1.8,2.2,0.8,1), oma = c(1,1,1,1))
### 32G-18nl/min ###
k1<-read.xlsx("he-32g.xlsx", sheetName = "2kv18", header = TRUE)
k2<-read.xlsx("qd3.xlsx", sheetName = "v1", header = TRUE)
k3<-read.xlsx("qd3.xlsx", sheetName = "v2", header = TRUE)
k4<-read.xlsx("qd3.xlsx", sheetName = "v3", header = TRUE)
##
yan<-c("red","blue","black","green3")
pcc<-c(0,1,2,5)
##
error.bar <- function(x, y, upper, coll,lower=upper, length=0.05,...){
if(length(x) != length(y) | length(y) !=length(lower) | length(lower) != length(upper))
stop("vectors must be same length")
arrows(x,y+upper, x, y-lower,col=coll, angle=90, code=3, length=length, ...)
}
##
plot(k1$fv,k1$d_r, col=0,xlab = expression(italic(f["v"]) (Hz)),
ylab = expression(italic(d["d"]) (um)), mgp=c(1.1, 0, 0),tck=0.02,
main = "", xlim = c(0, 6000),ylim=c(0,70))
mtext("32G-18nl/min",3,line=-1,font=2,cex=0.9)
lines(k1$fv,k1$d_r,col=yan[1],pch=pcc[1],type="b",lwd=2,lty=2)
lines(k2$fv,k2$deva,col=yan[2],pch=pcc[2],type="b",lwd=2,lty=2)
lines(k3$fv,k3$deva,col=yan[3],pch=pcc[3],type="b",lwd=2,lty=2)
lines(k4$fv,k4$deva,col=yan[4],pch=pcc[4],type="b",lwd=2,lty=2)
error.bar(k1$fv,k1$d_r,k1$stdd/2,col=yan[1])
error.bar(k2$fv,k2$deva,k2$stdd/2,col=yan[2])
error.bar(k3$fv,k3$deva,k3$stdd/2,col=yan[3])
error.bar(k4$fv,k4$deva,k4$stdd/2,col=yan[4])
leg<-c("V0+0:2kv+0Kv","Va+Vb:2kv-1.7kv","Va+Vb:2kv-1.9kv","Va+Vb:2kv-1.95kv")
legend("topright",legend=leg,col=yan,pch=pcc,lwd=1.5,lty=2,inset=.02,bty="n",cex=0.8)
### 32G-180nl/min ###
ka<-read.xlsx("he-32g.xlsx", sheetName = "2kv180", header = TRUE)
kb<-read.xlsx("qd4.xlsx", sheetName = "v1", header = TRUE)
kc<-read.xlsx("qd4.xlsx", sheetName = "v2", header = TRUE)
kd<-read.xlsx("qd4.xlsx", sheetName = "v3", header = TRUE)
plot(ka$fv,ka$d_r, col=0,xlab = expression(italic(f["v"]) (Hz)),
ylab = expression(italic(d["d"]) (um)), mgp=c(1.1, 0, 0),tck=0.02,
main = "", xlim = c(0,4000),ylim=c(0,70))
mtext("32G-180nl/min",3,line=-1,font=2,cex=0.9)
lines(ka$fv,ka$d_r,col=yan[1],pch=pcc[1],type="b",lwd=2,lty=2)
lines(kb$fv,kb$deva,col=yan[2],pch=pcc[2],type="b",lwd=2,lty=2)
lines(kc$fv,kc$deva,col=yan[3],pch=pcc[3],type="b",lwd=2,lty=2)
lines(kd$fv,kd$deva,col=yan[4],pch=pcc[4],type="b",lwd=2,lty=2)
error.bar(ka$fv,ka$d_r,ka$stdd/2,col=yan[1])
error.bar(kb$fv,kb$deva,kb$stdd/2,col=yan[2])
error.bar(kc$fv,kc$deva,kc$stdd/2,col=yan[3])
error.bar(kd$fv,kd$deva,kd$stdd/2,col=yan[4])
leg<-c("V0+0:2kv+0Kv","Va+Vb:2kv-1.7kv","Va+Vb:2kv-1.9kv","Va+Vb:2kv-1.95kv")
legend("topright",legend=leg,col=yan,pch=pcc,lwd=1.5,lty=2,inset=.02,bty="n",cex=0.8)
# 32G 0+2kv 18nl/min
x<-c(2,2.5,3,3.5,4)
y0<-c(33/60,33/60,33.4/60,34/60,34/60)
y1<-c(34/60,35/60,34.5/60,34.5/60,35/60) # 32G,0+2kv,18nl/min
y2<-c(39/80,40/80,42/80,41/80,41/80) # 32G,V1,18nl/min
y3<-c(41/78,40/80,41/80,43/79,38/79) # 32G,V2,18nl/min
y4<-c(39/79,39/79,40/79,40/81,40/81) # 32G,V3,18nl/min
y5<-c(40/79,38/79,43/82,44/80,40/81) # 32G,V1,180nl/min
y6<-c(35/80,34/80,35/80,35/80,36/80) # 32G,V2,180nl/min
y7<-c(36/81,36/81,36/81,36/81,37/81) # 32G,V3,180nl/min
plot(x,y1, col=0,xlab = expression(italic(f["v"]) (KHz)),
ylab = expression(italic(h["m"]/D)), mgp=c(1.1, 0, 0),tck=0.02,
main = "", xlim = c(2,4),ylim=c(0.2,0.8))
mtext("18nl/min-hm",3,line=-1,font=2,cex=0.9)
lines(x,y0,col=yan[1],pch=pcc[1],type="b",lwd=2,lty=2)
lines(x,y2,col=yan[2],pch=pcc[2],type="b",lwd=2,lty=2)
lines(x,y3,col=yan[3],pch=pcc[3],type="b",lwd=2,lty=2)
lines(x,y4,col=yan[4],pch=pcc[4],type="b",lwd=2,lty=2)
leg<-c("2+0kv-18nl/min","2kv+1.7kv-18nl/min",
"2kv-1.8kv-18nl/min","2kv-1.9kv-18nl/min")
legend("bottomright",legend=leg,col=yan,pch=pcc,lwd=1.5,lty=2,inset=.01,cex=0.8,bty="n")
plot(x,y1, col=0,xlab = expression(italic(f["v"]) (KHz)),
ylab = expression(italic(h["m"]/D)), mgp=c(1.1, 0, 0),tck=0.02,
main = "", xlim = c(2,4),ylim=c(0.2,0.8))
mtext("180nl/min-hm",3,line=-1,font=2,cex=0.9)
lines(x,y1,col=yan[1],pch=pcc[1],type="b",lwd=2,lty=2)
lines(x,y5,col=yan[2],pch=pcc[2],type="b",lwd=2,lty=2)
lines(x,y6,col=yan[3],pch=pcc[3],type="b",lwd=2,lty=2)
lines(x,y7,col=yan[4],pch=pcc[4],type="b",lwd=2,lty=2)
leg2<-c("2+0kv-18nl/min","2kv+1.7kv-18nl/min",
"2kv-1.8kv-18nl/min","2kv-1.9kv-18nl/min")
legend("bottomright",legend=leg,col=yan,pch=pcc,lwd=1.5,lty=2,inset=.01,cex=0.8,bty="n")
| {"hexsha": "9ff959e9ccaca46910635248d0f1abd26aa6b7ba", "size": 4727, "ext": "r", "lang": "R", "max_stars_repo_path": "thesis/chap5/chap5-fig5-21.r", "max_stars_repo_name": "shuaimeng/r", "max_stars_repo_head_hexsha": "94fa0cce89c89a847b95000f07e64a0feac1eabe", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "thesis/chap5/chap5-fig5-21.r", "max_issues_repo_name": "shuaimeng/r", "max_issues_repo_head_hexsha": "94fa0cce89c89a847b95000f07e64a0feac1eabe", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "thesis/chap5/chap5-fig5-21.r", "max_forks_repo_name": "shuaimeng/r", "max_forks_repo_head_hexsha": "94fa0cce89c89a847b95000f07e64a0feac1eabe", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 37.2204724409, "max_line_length": 104, "alphanum_fraction": 0.6016500952, "num_tokens": 2229} |
#include <boost/fusion/container/map/map_iterator.hpp>
| {"hexsha": "1ddbd3ba01c4aff7dbd5fa8fb732c0aef0440247", "size": 55, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "src/boost_fusion_container_map_map_iterator.hpp", "max_stars_repo_name": "miathedev/BoostForArduino", "max_stars_repo_head_hexsha": "919621dcd0c157094bed4df752b583ba6ea6409e", "max_stars_repo_licenses": ["BSL-1.0"], "max_stars_count": 10.0, "max_stars_repo_stars_event_min_datetime": "2018-03-17T00:58:42.000Z", "max_stars_repo_stars_event_max_datetime": "2021-07-06T02:48:49.000Z", "max_issues_repo_path": "src/boost_fusion_container_map_map_iterator.hpp", "max_issues_repo_name": "miathedev/BoostForArduino", "max_issues_repo_head_hexsha": "919621dcd0c157094bed4df752b583ba6ea6409e", "max_issues_repo_licenses": ["BSL-1.0"], "max_issues_count": 2.0, "max_issues_repo_issues_event_min_datetime": "2021-03-26T15:17:35.000Z", "max_issues_repo_issues_event_max_datetime": "2021-05-20T23:55:08.000Z", "max_forks_repo_path": "src/boost_fusion_container_map_map_iterator.hpp", "max_forks_repo_name": "miathedev/BoostForArduino", "max_forks_repo_head_hexsha": "919621dcd0c157094bed4df752b583ba6ea6409e", "max_forks_repo_licenses": ["BSL-1.0"], "max_forks_count": 4.0, "max_forks_repo_forks_event_min_datetime": "2019-05-28T21:06:37.000Z", "max_forks_repo_forks_event_max_datetime": "2021-07-06T03:06:52.000Z", "avg_line_length": 27.5, "max_line_length": 54, "alphanum_fraction": 0.8181818182, "num_tokens": 11} |
function [varargout]=gpatch(varargin)
% function [h]=gpatch(F,V,C,CE,A,L)
% ------------------------------------------------------------------------
% This function is a short-hand version of the |patch| command. The inputs
% for |gpatch| are the faces (F), the vertices (V), the color description
% (C), the edge color description CE, the transparancy (A), and the edge
% width (L).
% The color data descriptions C (or equivalently CE for edges) can be:
% 1) A string such as 'g' for green
% 2) A triplet of RGD values e.g. [1 0 0] is blue
% 3) A nx1 or a mx1 array of colormapped colors (where n=size(F,1) or
% m=size(V,1))
% 4) (simiarl to 3) A nx3 or a mx3 RGB color value array for the faces or
% vertices respectively.
%
%
% Kevin Mattheus Moerman
% [email protected]
%
% 2017
% 2018/02/07 Added support for colormapped edges
%------------------------------------------------------------------------
switch nargin
case 1
error('Not enough input arguments, provide at least faces and vertices');
case 2
F=varargin{1};
V=varargin{2};
C='g';
CE='k';
A=1;
L=[];
case 3
F=varargin{1};
V=varargin{2};
C=varargin{3};
CE='k';
A=1;
L=[];
case 4
F=varargin{1};
V=varargin{2};
C=varargin{3};
CE=varargin{4};
A=1;
L=[];
case 5
F=varargin{1};
V=varargin{2};
C=varargin{3};
CE=varargin{4};
A=varargin{5};
L=[];
case 6
F=varargin{1};
V=varargin{2};
C=varargin{3};
CE=varargin{4};
A=varargin{5};
L=varargin{6};
otherwise
error('Wrong number of input arguments');
end
if isempty(C)
C='g';
end
if isempty(CE)
C='k';
end
if isa(F,'cell') %Assume all entries are cells defining multiple patch data sets
for q=1:1:numel(F)
f=F{q};
if isa(V,'cell')
v=V{q};
else
v=V;
end
if isa(C,'cell')
c=C{q};
else
c=C;
end
if isa(CE,'cell')
ce=CE{q};
else
ce=CE;
end
if isa(A,'cell')
a=A{q};
else
a=A;
end
hp(q)=plotPatch(f,v,c,ce,a,L);
end
else
hp=plotPatch(F,V,C,CE,A,L);
end
if nargout==1
varargout{1}=hp;
end
end
%%
function hp=plotPatch(F,V,C,CE,A,L)
% hf=gcf;
% if isempty(hf.Children)
% gca;
% view(3);
% end
argInPatch.Faces=F;
argInPatch.Vertices=V;
argInPatch.EdgeColor=CE;
if ischar(C) %Plain single color
argInPatch.FaceColor=C;
if strcmp(C,'kw')
argInPatch.FaceColor=grayColor(0.5);
end
if strcmp(C,'rw')
argInPatch.FaceColor=[1 0.5 0.5];
end
if strcmp(C,'gw')
argInPatch.FaceColor=[0.5 1 0.5];
end
if strcmp(C,'bw')
argInPatch.FaceColor=[0.5 0.5 1];
end
if strcmp(C,'yw')
argInPatch.FaceColor=[1 1 0.5];
end
if strcmp(C,'cw')
argInPatch.FaceColor=[0.5 1 1];
end
if strcmp(C,'mw')
argInPatch.FaceColor=[1 0.5 1];
end
elseif size(C,2)==1
argInPatch.FaceColor='flat';
argInPatch.CData=double(C);
elseif size(C,2)==3 && size(C,1)==1 %Assume single RGB level
argInPatch.FaceColor=double(C);
elseif size(C,2)==3 && size(C,1)>1 %Assume RGB array
argInPatch.FaceColor='flat';
argInPatch.FaceVertexCData=double(C);
else
error('Invalid face-vertex color data input');
end
if ischar(CE) %Plain single color
argInPatch.EdgeColor=CE;
if strcmp(CE,'kw')
argInPatch.EdgeColor=grayColor(0.5);
end
if strcmp(CE,'rw')
argInPatch.EdgeColor=[1 0.5 0.5];
end
if strcmp(CE,'gw')
argInPatch.EdgeColor=[0.5 1 0.5];
end
if strcmp(CE,'bw')
argInPatch.EdgeColor=[0.5 0.5 1];
end
if strcmp(CE,'yw')
argInPatch.EdgeColor=[1 1 0.5];
end
if strcmp(CE,'cw')
argInPatch.EdgeColor=[0.5 1 1];
end
if strcmp(CE,'mw')
argInPatch.EdgeColor=[1 0.5 1];
end
elseif size(CE,2)==1
if size(CE,1)>1
if size(CE,1)==size(F,1)
[CE]=faceToVertexMeasure(F,V,CE);
argInPatch.EdgeColor='flat';
argInPatch.CData=double(CE);
end
if size(CE,1)==size(V,1)
argInPatch.EdgeColor='flat';
argInPatch.CData=double(CE);
end
else
argInPatch.EdgeColor='flat';
argInPatch.CData=double(CE)*ones(size(V,1),1);
end
elseif size(CE,2)==3 && size(CE,1)==1 %Assume single RGB level
argInPatch.EdgeColor=double(CE);
elseif size(CE,2)==3 && size(CE,1)>1 %Assume RGB array
argInPatch.EdgeColor='flat';
argInPatch.FaceVertexCData=double(CE);
else
error('Invalid edge color data input');
end
if numel(A)==1 %Plain single alpha
argInPatch.FaceAlpha=A;
elseif size(A,2)==1 %Alpha mapping
argInPatch.FaceAlpha='flat';
argInPatch.FaceVertexAlphaData=A;
else
error('Invalid alpha data input');
end
if ~isempty(L)
argInPatch.LineWidth=L;
end
hp=patch(argInPatch);
end
%%
% _*GIBBON footer text*_
%
% License: <https://github.com/gibbonCode/GIBBON/blob/master/LICENSE>
%
% GIBBON: The Geometry and Image-based Bioengineering add-On. A toolbox for
% image segmentation, image-based modeling, meshing, and finite element
% analysis.
%
% Copyright (C) 2018 Kevin Mattheus Moerman
%
% This program is free software: you can redistribute it and/or modify
% it under the terms of the GNU General Public License as published by
% the Free Software Foundation, either version 3 of the License, or
% (at your option) any later version.
%
% This program is distributed in the hope that it will be useful,
% but WITHOUT ANY WARRANTY; without even the implied warranty of
% MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
% GNU General Public License for more details.
%
% You should have received a copy of the GNU General Public License
% along with this program. If not, see <http://www.gnu.org/licenses/>.
| {"author": "MultiDIC", "repo": "MultiDIC", "sha": "d363c3ea74673e58df275d4a4c8e528ef5472acb", "save_path": "github-repos/MATLAB/MultiDIC-MultiDIC", "path": "github-repos/MATLAB/MultiDIC-MultiDIC/MultiDIC-d363c3ea74673e58df275d4a4c8e528ef5472acb/lib_ext/GIBBON/lib/gpatch.m"} |
#!/usr/bin/python
##############################################
###Python template
###Author: Elizabeth Lee
###Date: 6/9/13
###Function:
#### 1) create scatter of OR by zipcode vs. urban metro RUCC avg 2013
###Import data: zipcode_bysseas_cl.csv
###Command Line: python
##############################################
### notes ###
### packages ###
import matplotlib
import csv
import numpy as np
import matplotlib.pyplot as plt
from pylab import *
## local packages ##
import ORgenerator_v060713 as od
### data structures ###
child1, adult1, zip3_sdi, snum_sdi = [],[],[],[] # attack rates for children and adults for total by zipcode, zip3s from sdi data, season number in sdi data
y1 = [] # odds ratios for total cases by zipcode
zipdict, rucc_bin = {},[] # dictionary of zip3 and rural-urban categorization, list of rucc 1-3 bins that correspond with order of zip3s in sdi data
cs1, cs2, cs3, cs4, cs5, cs6, cs7, cs8, cs9, cs10 = [],[],[],[],[],[],[],[],[],[] # childlist for seasons 1-10
as1, as2, as3, as4, as5, as6, as7, as8, as9, as10 = [],[],[],[],[],[],[],[],[],[] # adultlist for seasons 1-10
ys1, ys2, ys3, ys4, ys5, ys6, ys7, ys8, ys9, ys10 = [],[],[],[],[],[],[],[],[],[] # OR for seasons 1-10
rbs1, rbs2, rbs3, rbs4, rbs5, rbs6, rbs7, rbs8, rbs9, rbs10 = [],[],[],[],[],[],[],[],[],[] # rucc_mn_bin for seasons 1-10
z3s1, z3s2, z3s3, z3s4, z3s5, z3s6, z3s7, z3s8, z3s9, z3s10 = [],[],[],[],[],[],[],[],[],[] # zip3_sdi for seasons 1-10
sns1, sns2, sns3, sns4, sns5, sns6, sns7, sns8, sns9, sns10 = [],[],[],[],[],[],[],[],[],[] # season number from sdi data for dataset broken into seasons 1-10
### parameters ###
### functions ###
# create a dictionary of zip3, rural-urban categorization as key, value
def createzipdict(csvreadfile, dictname):
ct=0
for row in csvreadfile:
if ct==0:
ct+=1
continue
else:
zipdict[str(row[0])] = int(row[3])
### import data ###
zORin=open('/home/elee/Dropbox/Elizabeth_Bansal_Lab/SDI_Data/R_export/zipcode_bysseas_cl2.csv','r') # use to calculate OR by zip3
zOR=csv.reader(zORin, delimiter=',')
zOR1in=open('/home/elee/Dropbox/Elizabeth_Bansal_Lab/SDI_Data/R_export/zipcode_cl2_s1.csv','r') # use to calculate OR by zip3
zOR1=csv.reader(zOR1in, delimiter=',')
zOR2in=open('/home/elee/Dropbox/Elizabeth_Bansal_Lab/SDI_Data/R_export/zipcode_cl2_s2.csv','r') # use to calculate OR by zip3
zOR2=csv.reader(zOR2in, delimiter=',')
zOR3in=open('/home/elee/Dropbox/Elizabeth_Bansal_Lab/SDI_Data/R_export/zipcode_cl2_s3.csv','r') # use to calculate OR by zip3
zOR3=csv.reader(zOR3in, delimiter=',')
zOR4in=open('/home/elee/Dropbox/Elizabeth_Bansal_Lab/SDI_Data/R_export/zipcode_cl2_s4.csv','r') # use to calculate OR by zip3
zOR4=csv.reader(zOR4in, delimiter=',')
zOR5in=open('/home/elee/Dropbox/Elizabeth_Bansal_Lab/SDI_Data/R_export/zipcode_cl2_s5.csv','r') # use to calculate OR by zip3
zOR5=csv.reader(zOR5in, delimiter=',')
zOR6in=open('/home/elee/Dropbox/Elizabeth_Bansal_Lab/SDI_Data/R_export/zipcode_cl2_s6.csv','r') # use to calculate OR by zip3
zOR6=csv.reader(zOR6in, delimiter=',')
zOR7in=open('/home/elee/Dropbox/Elizabeth_Bansal_Lab/SDI_Data/R_export/zipcode_cl2_s7.csv','r') # use to calculate OR by zip3
zOR7=csv.reader(zOR7in, delimiter=',')
zOR8in=open('/home/elee/Dropbox/Elizabeth_Bansal_Lab/SDI_Data/R_export/zipcode_cl2_s8.csv','r') # use to calculate OR by zip3
zOR8=csv.reader(zOR8in, delimiter=',')
zOR9in=open('/home/elee/Dropbox/Elizabeth_Bansal_Lab/SDI_Data/R_export/zipcode_cl2_s9.csv','r') # use to calculate OR by zip3
zOR9=csv.reader(zOR9in, delimiter=',')
zOR10in=open('/home/elee/Dropbox/Elizabeth_Bansal_Lab/SDI_Data/R_export/zipcode_cl2_s10.csv','r') # use to calculate OR by zip3
zOR10=csv.reader(zOR10in, delimiter=',')
RUCCavgin=open('/home/elee/Dropbox/Elizabeth_Bansal_Lab/Rural_Urban/R_export/zip3_RUCC2013avg_crosswalk.csv','r') # categorization of urban/rural by zip3
RUCCavg=csv.reader(RUCCavgin, delimiter=',')
### program ###
### analyze all zip3-season data together to see if there are patterns
createzipdict(RUCCavg, zipdict)
od.importer_zip3(zOR, adult1, child1, 3, 4, zip3_sdi, 2, snum_sdi, zipdict, rucc_bin)
print "rucc_binlen:", len(rucc_bin)
print "child1len:", len(child1), "adult1len:", len(adult1)
# adult1 index 101 and 104, adultlist == 0
# child1 index 2162, childlist == 0
od.ORgen(y1, child1, adult1)
print "y1len:", len(y1)
# OR vs. urban rural code (all seasons together)
rulab = ['populous urban metro area', 'small metro area', 'rural non-metro area']
xaxjitter = [x + np.random.uniform(-0.4, 0.4, 1) for x in rucc_bin]
print "length x-axis jitter:",len(xaxjitter)
plt.scatter(xaxjitter, y1, marker='o', color = 'black', label= "zipcode prefix")
# plt.scatter(domsubtypeplot, y3a, marker='o', color = 'red', label= "severe cases")
# plt.scatter(domsubtypeplot, y3b, marker='o', color = 'green', label= "milder cases")
# for num, subtype, OR in zip(seasonnum, rucc_bin, y1):
# plt.annotate(num, xy = (subtype, OR), xytext = (10,0), textcoords = 'offset points')
xlab=[1,2,3]
plt.ylabel('Odds ratio of attack rate, child:adult (zip3 popn normalized)')
plt.xlabel('Urban metro categorization')
plt.legend(loc="upper right")
plt.xticks(xlab, rulab)
plt.show()
# urban areas tend to have larger ranges of ORs
# number of zip3s: 396 populous urban metro - 322 smaller urban metro - 167 rural
#### analyze ORs by season
od.importer_zip3(zOR1, as1, cs1, 3, 4, z3s1, 2, sns1, zipdict, rbs1)
print "rucc_binlen:", len(rbs1)
print "childlen:", len(cs1), "adultlen:", len(as1)
# adult1 index 101 and 104, adultlist == 0
# child1 index 2162, childlist == 0
od.ORgen(ys1, cs1, as1)
print "ylen:", len(ys1)
od.importer_zip3(zOR2, as2, cs2, 3, 4, z3s2, 2, sns2, zipdict, rbs2)
od.importer_zip3(zOR3, as3, cs3, 3, 4, z3s3, 2, sns3, zipdict, rbs3)
od.importer_zip3(zOR4, as4, cs4, 3, 4, z3s4, 2, sns4, zipdict, rbs4)
od.importer_zip3(zOR5, as5, cs5, 3, 4, z3s5, 2, sns5, zipdict, rbs5)
od.importer_zip3(zOR6, as6, cs6, 3, 4, z3s6, 2, sns6, zipdict, rbs6)
od.importer_zip3(zOR7, as7, cs7, 3, 4, z3s7, 2, sns7, zipdict, rbs7)
od.importer_zip3(zOR8, as8, cs8, 3, 4, z3s8, 2, sns8, zipdict, rbs8)
od.importer_zip3(zOR9, as9, cs9, 3, 4, z3s9, 2, sns9, zipdict, rbs9)
od.importer_zip3(zOR10, as10, cs10, 3, 4, z3s10, 2, sns10, zipdict, rbs10)
od.ORgen(ys2, cs2, as2)
od.ORgen(ys3, cs3, as3)
od.ORgen(ys4, cs4, as4)
od.ORgen(ys5, cs5, as5)
od.ORgen(ys6, cs6, as6)
od.ORgen(ys7, cs7, as7)
od.ORgen(ys8, cs8, as8)
od.ORgen(ys9, cs9, as9)
od.ORgen(ys10, cs10, as10)
# OR vs. urban rural code by season
rulab = ['populous urban metro area', 'small metro area', 'rural non-metro area']
xaxjs1 = [x + np.random.uniform(-0.4, 0.4, 1) for x in rbs1]
print "ys1:",len(ys1),"length x-axis jitter:",len(xaxjs1)
xaxjs2 = [x + np.random.uniform(-0.4, 0.4, 1) for x in rbs2]
print "ys2:",len(ys2),"length x-axis jitter:",len(xaxjs2)
xaxjs3 = [x + np.random.uniform(-0.4, 0.4, 1) for x in rbs3]
print "ys3:",len(ys3),"length x-axis jitter:",len(xaxjs3)
xaxjs4 = [x + np.random.uniform(-0.4, 0.4, 1) for x in rbs4]
print "ys4",len(ys4),"length x-axis jitter:",len(xaxjs4)
xaxjs5 = [x + np.random.uniform(-0.4, 0.4, 1) for x in rbs5]
print "ys5:",len(ys5),"length x-axis jitter:",len(xaxjs5)
xaxjs6 = [x + np.random.uniform(-0.4, 0.4, 1) for x in rbs6]
print "ys6:",len(ys6),"length x-axis jitter:",len(xaxjs6)
xaxjs7 = [x + np.random.uniform(-0.4, 0.4, 1) for x in rbs7]
print "ys7:",len(ys7),"length x-axis jitter:",len(xaxjs7)
xaxjs8 = [x + np.random.uniform(-0.4, 0.4, 1) for x in rbs8]
print "ys8:",len(ys8),"length x-axis jitter:",len(xaxjs8)
xaxjs9 = [x + np.random.uniform(-0.4, 0.4, 1) for x in rbs9]
print "ys9:",len(ys9),"length x-axis jitter:",len(xaxjs9)
xaxjs10 = [x + np.random.uniform(-0.4, 0.4, 1) for x in rbs10]
print "ys10:",len(ys10),"length x-axis jitter:",len(xaxjs10)
plt.scatter(xaxjs1, ys1, marker='o', color = 'grey', label= "Season 1")
plt.scatter(xaxjs2, ys2, marker='o', color = 'black', label= "Season 2")
plt.scatter(xaxjs3, ys3, marker='o', color = 'red', label= "Season 3")
plt.scatter(xaxjs4, ys4, marker='o', color = 'orange', label= "Season 4")
plt.scatter(xaxjs5, ys5, marker='o', color = 'gold', label= "Season 5")
plt.scatter(xaxjs6, ys6, marker='o', color = 'green', label= "Season 6")
plt.scatter(xaxjs7, ys7, marker='o', color = 'blue', label= "Season 7")
plt.scatter(xaxjs8, ys8, marker='o', color = 'cyan', label= "Season 8")
plt.scatter(xaxjs9, ys9, marker='o', color = 'darkviolet', label= "Season 9")
plt.scatter(xaxjs10, ys10, marker='o', color = 'hotpink', label= "Season 10")
xlab=[1,2,3]
plt.ylabel('Odds ratio of attack rate, child:adult (zip3 popn normalized)')
plt.xlabel('Urban metro categorization')
plt.legend(loc="upper right")
plt.xticks(xlab, rulab)
plt.show()
# OR vs. urban rural code each season
plt.scatter(xaxjs1, ys1, marker='o', color = 'grey', label= "Season 1")
xlab=[1,2,3]
plt.ylabel('Odds ratio of attack rate, child:adult (zip3 popn normalized)')
plt.xlabel('Urban metro categorization')
plt.legend(loc="upper right")
plt.xticks(xlab, rulab)
plt.show()
plt.scatter(xaxjs2, ys2, marker='o', color = 'black', label= "Season 2")
xlab=[1,2,3]
plt.ylabel('Odds ratio of attack rate, child:adult (zip3 popn normalized)')
plt.xlabel('Urban metro categorization')
plt.legend(loc="upper right")
plt.xticks(xlab, rulab)
plt.show()
plt.scatter(xaxjs3, ys3, marker='o', color = 'red', label= "Season 3")
xlab=[1,2,3]
plt.ylabel('Odds ratio of attack rate, child:adult (zip3 popn normalized)')
plt.xlabel('Urban metro categorization')
plt.legend(loc="upper right")
plt.xticks(xlab, rulab)
plt.show()
plt.scatter(xaxjs4, ys4, marker='o', color = 'orange', label= "Season 4")
xlab=[1,2,3]
plt.ylabel('Odds ratio of attack rate, child:adult (zip3 popn normalized)')
plt.xlabel('Urban metro categorization')
plt.legend(loc="upper right")
plt.xticks(xlab, rulab)
plt.show()
plt.scatter(xaxjs5, ys5, marker='o', color = 'gold', label= "Season 5")
xlab=[1,2,3]
plt.ylabel('Odds ratio of attack rate, child:adult (zip3 popn normalized)')
plt.xlabel('Urban metro categorization')
plt.legend(loc="upper right")
plt.xticks(xlab, rulab)
plt.show()
plt.scatter(xaxjs6, ys6, marker='o', color = 'green', label= "Season 6")
xlab=[1,2,3]
plt.ylabel('Odds ratio of attack rate, child:adult (zip3 popn normalized)')
plt.xlabel('Urban metro categorization')
plt.legend(loc="upper right")
plt.xticks(xlab, rulab)
plt.show()
plt.scatter(xaxjs7, ys7, marker='o', color = 'blue', label= "Season 7")
xlab=[1,2,3]
plt.ylabel('Odds ratio of attack rate, child:adult (zip3 popn normalized)')
plt.xlabel('Urban metro categorization')
plt.legend(loc="upper right")
plt.xticks(xlab, rulab)
plt.show()
plt.scatter(xaxjs8, ys8, marker='o', color = 'cyan', label= "Season 8")
xlab=[1,2,3]
plt.ylabel('Odds ratio of attack rate, child:adult (zip3 popn normalized)')
plt.xlabel('Urban metro categorization')
plt.legend(loc="upper right")
plt.xticks(xlab, rulab)
plt.show()
plt.scatter(xaxjs9, ys9, marker='o', color = 'darkviolet', label= "Season 9")
xlab=[1,2,3]
plt.ylabel('Odds ratio of attack rate, child:adult (zip3 popn normalized)')
plt.xlabel('Urban metro categorization')
plt.legend(loc="upper right")
plt.xticks(xlab, rulab)
plt.show()
plt.scatter(xaxjs10, ys10, marker='o', color = 'hotpink', label= "Season 10")
xlab=[1,2,3]
plt.ylabel('Odds ratio of attack rate, child:adult (zip3 popn normalized)')
plt.xlabel('Urban metro categorization')
plt.legend(loc="upper right")
plt.xticks(xlab, rulab)
plt.show()
| {"hexsha": "94eaf2f6f272544f781038dd60857a1ff3d600a9", "size": 11462, "ext": "py", "lang": "Python", "max_stars_repo_path": "scripts/OR_urbanmetro_v6-7-13.py", "max_stars_repo_name": "eclee25/flu-SDI-exploratory-age", "max_stars_repo_head_hexsha": "2f5a4d97b84d2116e179e85fe334edf4556aa946", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2018-03-29T23:02:43.000Z", "max_stars_repo_stars_event_max_datetime": "2020-08-10T12:01:50.000Z", "max_issues_repo_path": "scripts/OR_urbanmetro_v6-7-13.py", "max_issues_repo_name": "eclee25/flu-SDI-exploratory-age", "max_issues_repo_head_hexsha": "2f5a4d97b84d2116e179e85fe334edf4556aa946", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "scripts/OR_urbanmetro_v6-7-13.py", "max_forks_repo_name": "eclee25/flu-SDI-exploratory-age", "max_forks_repo_head_hexsha": "2f5a4d97b84d2116e179e85fe334edf4556aa946", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 43.9157088123, "max_line_length": 158, "alphanum_fraction": 0.6932472518, "include": true, "reason": "import numpy", "num_tokens": 4108} |
from __future__ import print_function
import numpy as np
import argparse
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torchvision import datasets, transforms
from torch.autograd import Variable
class QAlexNet(nn.Module):
def __init__(self, num_classes=10):
super(QAlexNet, self).__init__()
self.conv1 = nn.Conv2d(
3, 64, kernel_size=5, stride=1, padding=2) # 32x32x3 -> 32x32x64
# self.pool1=nn.MaxPool2d(kernel_size=3, stride=2, padding =1 )# 32x32x64
# -> 16x16x64
self.bn1 = nn.BatchNorm2d(64)
self.conv2 = nn.Conv2d(
64, 64, kernel_size=5, stride=1, padding=2) # 16x16x64 -> 16x16x64
# self.pool2=nn.MaxPool2d(kernel_size=3, stride=2, padding = 1)# 16x16x64
# -> 8x8x64
self.bn2 = nn.BatchNorm2d(64)
self.fc1 = nn.Linear(64 * 8 * 8, 384)
self.fc2 = nn.Linear(384, 192)
self.fc3 = nn.Linear(192, num_classes)
def squeeze_layers(self, sl=None):
for k in self._modules.keys():
if k in sl:
for param in self._modules[k].parameters():
param.requires_grad = False
print(param.requires_grad)
def back(self):
for k in self._modules.keys():
for param in self._modules[k].parameters():
param.requires_grad = True
def forward(self, x):
x = F.max_pool2d(self.bn1(F.relu(self.conv1(x))), 3, 2, 1)
x = F.max_pool2d(self.bn2(F.relu(self.conv2(x))), 3, 2, 1)
x = x.view(x.size(0), -1)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
ALPHA = 1
class ANet(nn.Module):
def __init__(self):
super(ANet, self).__init__()
self.conv1 = nn.Conv2d(3, 64 * ALPHA, kernel_size=3)
self.bn1 = nn.BatchNorm2d(64 * ALPHA)
self.conv2 = nn.Conv2d(64 * ALPHA, 64 * ALPHA, kernel_size=3)
self.bn2 = nn.BatchNorm2d(64 * ALPHA)
self.conv3 = nn.Conv2d(64 * ALPHA, 128 * ALPHA, kernel_size=3)
self.bn3 = nn.BatchNorm2d(128 * ALPHA)
self.conv4 = nn.Conv2d(128 * ALPHA, 128 * ALPHA, kernel_size=3)
self.bn4 = nn.BatchNorm2d(128 * ALPHA)
self.conv_drop = nn.Dropout2d()
self.fc1 = nn.Linear(128 * 5 * 5 * ALPHA, 256)
self.fc2 = nn.Linear(256, 256)
self.fc3 = nn.Linear(256, 10)
self.drop = nn.Dropout()
def forward(self, x):
x = self.bn1(F.relu(self.conv1(x)))
x = F.max_pool2d(self.bn2(F.relu(self.conv2(x))), 2)
x = self.bn3(F.relu(self.conv3(x)))
x = F.max_pool2d(self.bn4(F.relu(self.conv4(x))), 2)
#x = self.conv_drop(x)
x = x.view(-1, 128 * ALPHA * 5 * 5)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
class AlexNet(nn.Module):
def __init__(self, num_classes=10):
super(AlexNet, self).__init__()
self.features = nn.Sequential(
nn.Conv2d(3, 64, kernel_size=11, stride=4, padding=5),
# 32x32x3 -> 8x8x64
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2, stride=2), # 8x8x64 -> 4x4x64
nn.Conv2d(64, 192, kernel_size=5, padding=2), # 4x4x64 -> 4x4x192
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2, stride=2), # 4x4x64 -> 2x2x192
nn.Conv2d(192, 384, kernel_size=3, padding=1),
# 2x2x192 -> 2x2x384
nn.ReLU(inplace=True),
nn.Conv2d(384, 256, kernel_size=3, padding=1),
# 2x2x384 -> 2x2x256
nn.ReLU(inplace=True),
nn.Conv2d(256, 256, kernel_size=3, padding=1),
# 2x2x256 -> 2x2x256
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=2, stride=2), # 2x2x256 -> 1x1x256
)
self.classifier = nn.Linear(256, num_classes)
def forward(self, x):
x = self.features(x)
x = x.view(x.size(0), -1)
x = self.classifier(x)
return x
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(3, 64, kernel_size=3)
self.conv2 = nn.Conv2d(64, 64, kernel_size=3)
self.conv3 = nn.Conv2d(64, 128, kernel_size=3)
self.conv4 = nn.Conv2d(128, 128, kernel_size=3)
self.conv_drop = nn.Dropout2d()
self.fc1 = nn.Linear(128 * 5 * 5, 256)
self.fc2 = nn.Linear(256, 256)
self.fc3 = nn.Linear(256, 10)
self.drop = nn.Dropout()
def forward(self, x):
x = F.relu(self.conv1(x))
x = F.max_pool2d(F.relu(self.conv2(x)), 2)
x = F.relu(self.conv3(x))
x = F.max_pool2d(F.relu(self.conv4(x)), 2)
x = self.conv_drop(x)
x = x.view(-1, 128 * 5 * 5)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.drop(x)
x = self.fc3(x)
return x
class FcNet(nn.Module):
def __init__(self, num_classes=10):
super(FcNet, self).__init__()
self.fc1 = nn.Linear(32 * 32 * 3, 2048)
self.bn1 = nn.BatchNorm2d(2048)
self.fc2 = nn.Linear(2048, 256)
self.bn2 = nn.BatchNorm2d(256)
self.fc3 = nn.Linear(256, 10)
self.drop = nn.Dropout()
def forward(self, x):
x = x.view(-1, 32 * 32 * 3)
x = F.relu(self.fc1(x))
x = self.bn1(x)
x = F.relu(self.fc2(x))
x = self.bn2(x)
x = self.drop(x)
x = self.fc3(x)
return x
class LeNet(nn.Module):
def __init__(self):
super(LeNet, self).__init__()
self.conv1 = nn.Conv2d(1, 20, 5, 1)
self.conv2 = nn.Conv2d(20, 50, 5, 1)
self.fc1 = nn.Linear(4 * 4 * 50, 500)
self.fc2 = nn.Linear(500, 10)
def forward(self, x):
x = F.relu(self.conv1(x))
x = F.max_pool2d(x, 2, 2)
x = F.relu(self.conv2(x))
x = F.max_pool2d(x, 2, 2)
x = x.view(-1, 4 * 4 * 50)
x = F.relu(self.fc1(x))
x = self.fc2(x)
return x
| {"hexsha": "f05f903315b385e6ea385806809eca7f9ec9bd60", "size": 6093, "ext": "py", "lang": "Python", "max_stars_repo_path": "models/model.py", "max_stars_repo_name": "LinjianMa/neuralODE-282", "max_stars_repo_head_hexsha": "28d7b520beeeeb773e651110d1faedda6cb835e7", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "models/model.py", "max_issues_repo_name": "LinjianMa/neuralODE-282", "max_issues_repo_head_hexsha": "28d7b520beeeeb773e651110d1faedda6cb835e7", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "models/model.py", "max_forks_repo_name": "LinjianMa/neuralODE-282", "max_forks_repo_head_hexsha": "28d7b520beeeeb773e651110d1faedda6cb835e7", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.85, "max_line_length": 81, "alphanum_fraction": 0.5511242409, "include": true, "reason": "import numpy", "num_tokens": 1958} |
function res = adjD(y)
res = zeros(size(y,1),size(y,2));
%y1 = ones(imsize)*y(1)/sqrt(prod(imsize));
%yx = (reshape(y(2:prod(imsize)+1), imsize(1), imsize(2)));
%yy = (reshape(y(prod(imsize)+2:end), imsize(1), imsize(2)));
res = adjDx(y(:,:,1)) + adjDy(y(:,:,2));
return;
function res = adjDy(x)
res = x(:,[1,1:end-1]) - x;
res(:,1) = -x(:,1);
res(:,end) = x(:,end-1);
function res = adjDx(x)
res = x([1,1:end-1],:) - x;
res(1,:) = -x(1,:);
res(end,:) = x(end-1,:);
| {"author": "thomaskuestner", "repo": "CS_MoCo_LAB", "sha": "a26e8e483624b2e4ee669e7a069ba9c74d2d2e4b", "save_path": "github-repos/MATLAB/thomaskuestner-CS_MoCo_LAB", "path": "github-repos/MATLAB/thomaskuestner-CS_MoCo_LAB/CS_MoCo_LAB-a26e8e483624b2e4ee669e7a069ba9c74d2d2e4b/reconstruction/matlab/CS_LAB_matlab/@TVOP/private/adjD.m"} |
import torch
import numpy as np
from torch import nn
from torch.nn import functional as F
from torch import optim
from metalearning.learner import Learner
from utils import get_performance
from copy import deepcopy
class Meta(nn.Module):
def __init__(self, args):
super(Meta, self).__init__()
self.update_lr = args.update_lr
self.meta_lr = args.meta_lr
self.n_way = args.n_way
self.k_spt = args.k_spt
self.k_qry = args.k_qry
self.task_num = args.batch_num
self.update_step = args.update_step
self.update_step_test = args.update_step_test
self.config = [
('linear', [args.hidden, 200]),
('linear', [args.n_way, args.hidden])
]
self.net = Learner(self.config)
self.meta_optim = optim.Adam(self.net.parameters(), lr=self.meta_lr)
def clip_grad_by_norm_(self, grad, max_norm):
total_norm = 0
counter = 0
for g in grad:
param_norm = g.data.norm(2)
total_norm += param_norm.item() ** 2
counter += 1
total_norm = total_norm ** (1. / 2)
clip_coef = max_norm / (total_norm + 1e-6)
if clip_coef < 1:
for g in grad:
g.data.mul_(clip_coef)
return total_norm/counter
def forward_kd(self, x_spt, y_spt, x_qry, y_qry,teacher_score,kd):
task_num = self.task_num
querysz = y_qry[0].shape[0]
# querysz = self.n_way * self.k_qry
losses_q = [0 for _ in range(self.update_step + 1)]
f1s = [0 for _ in range(self.update_step + 1)]
accs = [0 for _ in range(self.update_step + 1)]
recalls = [0 for _ in range(self.update_step + 1)]
precs = [0 for _ in range(self.update_step + 1)]
corrects = [0 for _ in range(self.update_step + 1)]
TP, TN, FN, FP = [], [], [], []
# self.net = deepcopy(self.net)
# for i in range(task_num):
for i in range(1):
# x_spt[i] = x_spt[i].cuda()
# y_spt[i] = y_spt[i].cuda()
# x_qry[i] = x_qry[i].cuda()
# y_qry[i] = y_qry[i].cuda()
x_spt[i] = x_spt[i]
y_spt[i] = y_spt[i]
x_qry[i] = x_qry[i]
y_qry[i] = y_qry[i]
logits_meta_train = self.net(x_spt[i], vars=None, bn_training=True)
with torch.no_grad():
logits_meta_val = self.net(x_qry[i], vars=None, bn_training=True)
if kd ==1:
distillation_loss = self.net.distillation(logits_meta_train,y_spt[i].squeeze(),teacher_score[i],logits_meta_val,temp=10.0,alpha=0.01) # distillation loss
grad = torch.autograd.grad(distillation_loss, self.net.parameters())
elif kd == 0:
loss = F.cross_entropy(logits_meta_train, y_spt[i].squeeze())
grad = torch.autograd.grad(loss, self.net.parameters()) # 计算梯度
fast_weights = list(map(lambda p: p[1] - self.update_lr * p[0], zip(grad, self.net.parameters())))
with torch.no_grad():
logits_q = self.net(x_qry[i], self.net.parameters(), bn_training=True)
loss_q = F.cross_entropy(logits_q, y_qry[i].squeeze())
losses_q[0] += loss_q
pred_q = F.softmax(logits_q, dim=1).argmax(dim=1)
correct = torch.eq(pred_q, y_qry[i].squeeze()).sum().item()
corrects[0] = corrects[0] + correct
f1_sub, acc_sub, recall_sub, prec_sub = get_performance(logits_q, y_qry[i])
f1s[0] = f1s[0] + f1_sub
accs[0] = accs[0] + acc_sub
recalls[0] = recalls[0] + recall_sub
precs[0] = precs[0] + prec_sub
with torch.no_grad():
logits_q = self.net(x_qry[i], fast_weights, bn_training=True)
loss_q = F.cross_entropy(logits_q, y_qry[i].squeeze())
losses_q[1] += loss_q
pred_q = F.softmax(logits_q, dim=1).argmax(dim=1)
correct = torch.eq(pred_q, y_qry[i].squeeze()).sum().item()
corrects[1] = corrects[1] + correct
f1_sub, acc_sub, recall_sub, prec_sub = get_performance(logits_q, y_qry[i])
f1s[1] = f1s[1] + f1_sub
accs[1] = accs[1] + acc_sub
recalls[1] = recalls[1] + recall_sub
precs[1] = precs[1] + prec_sub
for k in range(1, self.update_step):
logits = self.net(x_spt[i], fast_weights, bn_training=True)
with torch.no_grad():
logits_meta_val = self.net(x_qry[i], fast_weights, bn_training=True)
if kd == 1:
distillation_loss = self.net.distillation(logits, y_spt[i].squeeze(), teacher_score[i],logits_meta_val,
temp=10.0, alpha=0.01) # distillation loss
grad = torch.autograd.grad(distillation_loss, fast_weights)
elif kd == 0:
loss = F.cross_entropy(logits, y_spt[i].squeeze())
grad = torch.autograd.grad(loss, fast_weights)
# loss = F.cross_entropy(logits, y_spt[i].squeeze())
# grad = torch.autograd.grad(loss, fast_weights)
fast_weights = list(map(lambda p: p[1] - self.update_lr * p[0], zip(grad, fast_weights)))
#####################
logits_q = self.net(x_qry[i], fast_weights, bn_training=True)
loss_q = F.cross_entropy(logits_q, y_qry[i].squeeze())
# plot_data = np.hstack((logits_q.detach().numpy(),y_qry[i].detach().numpy()))
# np.savetxt(r'embedding_meta_plot.txt', plot_data, fmt="%.8e",
# delimiter=' ')
losses_q[k + 1] += loss_q
with torch.no_grad():
pred_q = F.softmax(logits_q, dim=1).argmax(dim=1)
correct = torch.eq(pred_q, y_qry[i].squeeze()).sum().item() # convert to numpy
corrects[k + 1] = corrects[k + 1] + correct
f1_sub, acc_sub, recall_sub, prec_sub = get_performance(logits_q, y_qry[i])
f1s[k + 1] = f1s[k + 1] + f1_sub
accs[k + 1] = accs[k + 1] + acc_sub
recalls[k + 1] = recalls[k + 1] + recall_sub
precs[k + 1] = precs[k + 1] + prec_sub
# TP.append(((pred_q == 1) & (y_qry[i].squeeze() == 1)).sum().item())
# TN.append(((pred_q == 0) & (y_qry[i].squeeze() == 0)).sum().item())
# FN.append(((pred_q == 0) & (y_qry[i].squeeze() == 1)).sum().item())
# FP.append(((pred_q == 1) & (y_qry[i].squeeze() == 0)).sum().item())
# loss_q = losses_q[-1] / task_num
# self.meta_optim.zero_grad()
# loss_q.backward(retain_graph=True)
# self.meta_optim.step()
# acc1 = np.array(corrects) / (querysz * task_num)
# TP_ave = np.mean(TP)
# TN_ave =np.mean(TN)
# FN_ave = np.mean(FN)
# FP_ave = np.mean(FP)
# p = TP_ave / (TP_ave + FP_ave)
# r = TP_ave / (TP_ave + FN_ave)
# F1 = 2 * r * p / (r + p).data
# acc = (TP_ave + TN_ave) / (TP_ave + TN_ave + FP_ave + FN_ave)
loss_q = losses_q[-1] / task_num
# self.meta_optim.zero_grad()
# loss_q.backward(retain_graph = True)
# self.meta_optim.step()
# acc1 = np.array(corrects) / (querysz * task_num)
#
# precision = np.array(precs) / (task_num)
# recall = np.array(recalls) / (task_num)
f1 = np.array(f1s) / (task_num)
acc = np.array(accs) / (task_num)
# return loss_q, acc1, precision, recall, f1, acc2,p,r,F1,acc,TP_ave,TN_ave,FN_ave,FP_ave # 列表 记录了所有batch 数据 每一次模型的参数更新时的正确率
return acc,f1
def predict(self, x_spt):
task_num = self.task_num
teacher_score_list = [0 for _ in range(task_num )]
with torch.no_grad():
# for i in range(task_num):
for i in range(1):
logits = self.net(x_spt[i], vars=self.net.parameters(), bn_training=True)
teacher_score_list[i] = F.softmax(logits,dim=1)
return teacher_score_list | {"hexsha": "cf9248a769d68171e24266e95a35c4f4274da545", "size": 8386, "ext": "py", "lang": "Python", "max_stars_repo_path": "metalearning/meta_update.py", "max_stars_repo_name": "yyyqqq5/MetaHG", "max_stars_repo_head_hexsha": "fa5fa36fdda4a491d68c8e66b70e2e34a7eb356d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "metalearning/meta_update.py", "max_issues_repo_name": "yyyqqq5/MetaHG", "max_issues_repo_head_hexsha": "fa5fa36fdda4a491d68c8e66b70e2e34a7eb356d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "metalearning/meta_update.py", "max_forks_repo_name": "yyyqqq5/MetaHG", "max_forks_repo_head_hexsha": "fa5fa36fdda4a491d68c8e66b70e2e34a7eb356d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 41.93, "max_line_length": 170, "alphanum_fraction": 0.5366086334, "include": true, "reason": "import numpy", "num_tokens": 2278} |
import numpy as np
import cvxpy as cvx
import util
def set_contains_array(S, a):
"""
:param S: list of np.ndarray
:param a: np.ndarray
:return: contains, 0 or 1
"""
contains = 0
for b in S:
if not (a - b).any(): # if a contained in S
contains = 1
return contains
def set_sum_two(A, B):
"""
:param A: list of np.ndarray
:param B: list of np.ndarray
:return: list of np.ndarray
"""
C = []
for a in A:
for b in B:
if not set_contains_array(C, a + b):
C.append(a + b)
return C
def set_sum_list(Omega):
"""
Set sum of multiple set of np.ndarray
:param Omega: list of list of np.ndarray
:return: list of np.ndarray
"""
S = Omega[0]
# print 'len(Omega) =', len(Omega)
# print 0, 'S =', S
for i in range(1, len(Omega)):
# print i, 'Omega[i] =',Omega[i]
S = set_sum_two(S, Omega[i])
# print i, 'S =', S
return S
def pointwise_dominate(w, U):
"""
Test if w is point-wise dominated by all u in U
:param w: np.ndarray
:param U: list of np.ndarray
:return:
"""
for u in U:
if np.all(w < u):
return True
return False
def lp_dominate(w, U):
"""
Computes the belief in which w improves U the most.
With LP in White & Clark
:param w: np.ndarray
:param U: list of np.ndarray
:return: b if d >= 0 else None
"""
# print("LP dominate")
if len(U) == 0:
return w
S = len(w)
d = cvx.Variable()
b = cvx.Variable(S)
objective = cvx.Maximize(d)
# print("U", U)
constraints = [b.T*(w-u) >= d for u in U] + [np.sum(b) == 1]
prob = cvx.Problem(objective, constraints)
result = prob.solve()
# print("d =", d.value)
if d.value >= 0:
return np.ravel(b.value)
else:
return None
def dec_dominate(w, U):
"""
Computes the belief in which w improves U the most.
With Bender's decomposition (Walraven & Spaan, 2017)
:param w: np.ndarray
:param U: list of np.ndarray
:return: b if d >= 0 else None
"""
if len(U) == 0:
return w
S = len(w)
d = cvx.Variable()
b = cvx.Variable(S)
objective = cvx.Maximize(d)
# print("U", U)
constraints = [np.sum(b) == 1]
b_ = np.random.random(S)
b_ = b_ / np.sum(b_)
U_ = []
while 1:
_b = b_
u_ = U[np.argmin([np.dot((w - U[i]), _b) for i in range(len(U))])]
constraints += [d <= b.T*(w-u_)]
U_.append(u_)
prob = cvx.Problem(objective, constraints)
_ = prob.solve()
b_ = np.ravel(b.value)
if not (b_ - _b).any():
break
if d.value >= 0:
return _b
else:
return None
def lex_less(u, w):
if w is None:
return False
for i in range(len(u)):
if u[i] > w[i]:
return False
return True
def best_point(b, U):
# print("Find best")
_max = -np.inf
w = None
for i in range(len(U)):
u = U[i]
# print("b", b)
# print("u", u)
x = np.dot(b, u)
# print("x", x)
if x > _max or (x == _max and lex_less(u, U[w])):
w = i
_max = x
# print("max", _max)
return w
def prune(W, A=None):
# print("prune", W)
D, E = [], []
while len(W) > 0:
w = W[-1]
if pointwise_dominate(w, D):
W.pop()
else:
# b = lp_dominate(w, D)
b = dec_dominate(w, D)
if b is None:
W.pop()
else:
i = best_point(b, W)
D.append(W[i])
if A is not None:
E.append(A[i])
W.pop(i)
if A is not None:
return D, E
else:
return D
def set_union(V):
V_ = []
for v in V:
V_ += v
return V_
class POMDP:
def __init__(self, P=None, Z=None, R=None, g=None, alpha=1.0):
self.P = P # m x n x n: a(t)->s(t)->s(t+1)
self.Z = Z # m x n x k: a(t)->s(t+1)->o(t+1)
self.R = R # m x n x n: a(t)->s(t+1)->s(t+1)
self.g = g # n x 1: s(T)
self.alpha = alpha # discount factor
self.nActions = self.Z.shape[0] # m
self.nStates = self.Z.shape[1] # n
self.nLevels = self.Z.shape[2] # k
if g is None:
self.g = np.zeros(self.nStates)
# print self.nActions, self.nStates, self.nLevels
def update_belief(self, b, a, o):
p = self.Z[a, :, o] * self.P[a].T.dot(b)
return p / p.sum()
def monahan_enumeration(self, V):
"""construct the set of Omega
:param V: input list of alpha vectors
"""
V_, A_ = [], []
for a in range(self.nActions):
# print("Action", a)
Va = []
_r = np.sum(self.P[a] * self.R[a], axis=1) / self.nLevels
# print("_r:", _r)
for z in range(self.nLevels):
# print("Obs", z)
Vaz = [_r + self.alpha * (self.Z[a,:,z] * v).dot(self.P[a]) for v in V]
# print("Vaz", Vaz)
if len(Va) > 0:
Va = prune(set_sum_two(Va, Vaz)) # incremental pruning
else:
Va = Vaz
A_ += [a for _ in Va]
V_ += Va
V_, A_ = prune(V_, A_)
return V_, A_
def transition(self, a, s):
return np.random.choice(self.nStates, p=self.P[a, s])
def emmission(self, a, s):
return np.random.choice(self.nStates, p=self.Z[a, s])
@staticmethod
def optimal_action(b, V, A):
assert len(V) == len(A)
values = [np.dot(b, v) for v in V]
opt_idx = np.argmax(values)
return A[opt_idx], V[opt_idx]
def solve(self, T):
V = self.g
Values = [None for _ in range(T)] + [[self.g]]
Actions = [None for _ in range(T)]
for t in range(T):
V, A = self.monahan_enumeration(V)
Values[T-1-t] = V
Actions[T-1-t] = A
return Values, Actions
def plan(self, T, initial_belief=None, perform=False):
V = self.g
if initial_belief is None:
initial_belief = np.ones(self.nStates) / self.nStates
b = initial_belief
Values = [None for _ in range(T)] + [[self.g]]
Actions = [None for _ in range(T)]
for t in range(T):
V, A = self.monahan_enumeration(V)
Values[T - 1 - t] = V
Actions[T - 1 - t] = A
a0, v0 = self.optimal_action(b, Values[0], Actions[0])
if not perform:
return a0, v0
s = np.random.choice(self.nStates, p=b)
actions, states, observations, reward = [], [], [], 0.0
for t in range(T):
a, v = self.optimal_action(b, Values[t], Actions[t])
# print('a', a)
# print('v', v)
_s = s
s = self.transition(a, s)
o = self.transition(a, s)
b = self.update_belief(b, a, o)
states.append(_s)
actions.append(s)
observations.append(o)
reward += self.R[a, _s, s] * self.alpha ** t
return a0, v0, actions, states, observations, reward
def test_pomdp(nActions, nStates, nLevels, alpha):
# P = np.array([
# [[0.25, 0.75], [0.6 , 0.4 ]],
# [[0.5 , 0.5 ], [0.7 , 0.3 ]]])
# Z = np.array([
# [[0.55, 0.45], [0.3 , 0.7 ]],
# [[0.65, 0.35], [0.25, 0.75]]])
# R = np.array([
# [[2., 2. ], [ 0., 0.]],
# [[3., 3. ], [-1., -1.]]])
# g = np.array([2., -1.])
P = util.normalize(np.random.random(size=(nActions, nStates, nStates)), axis=2)
Z = util.normalize(np.random.random(size=(nActions, nStates, nLevels)), axis=2)
R = util.normalize(np.random.random(size=(nActions, nStates, nStates)), axis=2)
g = util.normalize(np.random.random(size=(nStates)), axis=0)
pomdp = POMDP(P, Z, R, g, alpha)
T = 10
V = pomdp.g
a0, v0 = pomdp.plan(T, initial_belief=None, perform=False)
# a0, v0, actions, states, observations, reward = pomdp.plan(T, initial_belief=None, perform=True)
# print('a0 =', a0, 'v0 =', v0)
# print('actions:', actions)
# print('states:', states)
# print('observations:', observations)
# print('reward:', reward)
# for t in range(T):
# print("Iteration", t+1)
# V, A = pomdp.monahan_enumeration(V)
# for v, a in zip(V, A):
# print(v, a)
if __name__ == "__main__":
# import timeit
# print(timeit.timeit("main()"))
import time
for s in range(123, 133):
start_time = time.time()
np.random.seed(s)
print("===== SEED %d =====" %(s))
test_pomdp(nActions=2, nStates=3, nLevels=3, alpha=0.9975)
end_time = time.time()
print(end_time - start_time)
| {"hexsha": "8c3ed5966f5972419e367c22f46baad392ad9753", "size": 8958, "ext": "py", "lang": "Python", "max_stars_repo_path": "pomdp.py", "max_stars_repo_name": "gongjue/pocm", "max_stars_repo_head_hexsha": "1f8ae819aaa7fa5f25878a0662a23cb457c1180b", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "pomdp.py", "max_issues_repo_name": "gongjue/pocm", "max_issues_repo_head_hexsha": "1f8ae819aaa7fa5f25878a0662a23cb457c1180b", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "pomdp.py", "max_forks_repo_name": "gongjue/pocm", "max_forks_repo_head_hexsha": "1f8ae819aaa7fa5f25878a0662a23cb457c1180b", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 28.1698113208, "max_line_length": 102, "alphanum_fraction": 0.4934137084, "include": true, "reason": "import numpy,import cvxpy", "num_tokens": 2724} |
!---------------------------------------------------------------------!
! PHAML !
! !
! The Parallel Hierarchical Adaptive MultiLevel code for solving !
! linear elliptic partial differential equations of the form !
! (PUx)x + (QUy)y + RU = F on 2D polygonal domains with mixed !
! boundary conditions, and eigenvalue problems where F is lambda*U. !
! !
! PHAML is public domain software. It was produced as part of work !
! done by the U.S. Government, and is not subject to copyright in !
! the United States. !
! !
! William F. Mitchell !
! Applied and Computational Mathematics Division !
! National Institute of Standards and Technology !
! [email protected] !
! http://math.nist.gov/phaml !
! !
!---------------------------------------------------------------------!
module lapack_solve
!----------------------------------------------------
! This module contains routines for solving linear systems with LAPACK
!
! communication tags in this module are of the form 17xx
!----------------------------------------------------
!----------------------------------------------------
! Other modules used are:
use global
use message_passing
use linsystype_mod
use linsys_util
!----------------------------------------------------
implicit none
private
public make_lapack_symm_band, make_lapack_gen_band, destroy_lapack_band, &
lapack_spd, lapack_indef, lapack_precon
contains
! ---------------------
subroutine make_lapack_symm_band(use_nlev,phaml_matrix,lapack_matrix)
! ---------------------
!----------------------------------------------------
! This routine makes and factorizes a LAPACK symmetric band matrix from the
! first use_nlev refinement levels of the PHAML matrix, which should be in
! nodal form for level use_nlev. Dirichlet equations are omitted; make
! sure they are accounted for when setting up the right hand side.
! halfbandwidth does not include the diagonal. lapack_matrix%rhs is
! allocated but not set.
!----------------------------------------------------
!----------------------------------------------------
! Dummy arguments
integer, intent(in) :: use_nlev
type(linsys_type), intent(in) :: phaml_matrix
type(lapack_band_matrix), intent(out) :: lapack_matrix
!----------------------------------------------------
! Local variables:
integer :: i, j, k, num_nonzero, workd, astat, counter, space, jerr, profil, &
orig_bandwd, row, col, info
integer :: nodir_renum(phaml_matrix%neq), nodir_inv_renum(phaml_matrix%neq), &
degree(phaml_matrix%neq), rstart(phaml_matrix%neq), &
minband_renum(phaml_matrix%neq)
integer, allocatable :: connec(:), work(:)
!----------------------------------------------------
! Begin executable code
! count the number of equations without Dirichlet points, and the number
! of nonzeroes in the non-Dirichlet rows of mat, and create a renumbering
! that omits Dirichlet points.
lapack_matrix%neq = 0
num_nonzero = 0
nodir_renum = -1
do i=1,use_nlev
do j=phaml_matrix%begin_level(i),phaml_matrix%begin_level(i+1)-1
if (phaml_matrix%equation_type(j) == DIRICHLET) cycle
lapack_matrix%neq = lapack_matrix%neq + 1
nodir_renum(j) = lapack_matrix%neq
nodir_inv_renum(lapack_matrix%neq) = j
do k=phaml_matrix%begin_row(j),phaml_matrix%end_row(j)
if (phaml_matrix%column_index(k) == NO_ENTRY) cycle
if (phaml_matrix%equation_type(phaml_matrix%column_index(k)) == DIRICHLET) cycle
if (phaml_matrix%column_index(k) >= phaml_matrix%begin_level(use_nlev+1)) cycle
num_nonzero = num_nonzero + 1
end do
end do
end do
! nullify all array components of lapack_matrix
nullify(lapack_matrix%matrix,lapack_matrix%rhs,lapack_matrix%renum, &
lapack_matrix%inv_renum,lapack_matrix%ipiv)
! if no equations, never use lapack_matrix so don't continue
if (lapack_matrix%neq == 0) return
! create column pointers for the matrix with Dirichlet points removed
allocate(connec(num_nonzero-lapack_matrix%neq),stat=astat)
if (astat /= 0) then
ierr = ALLOC_FAILED
call fatal("allocation failed in make_lapack_symm_band")
return
endif
orig_bandwd = 0
counter = 0
do i=1,lapack_matrix%neq
j = nodir_inv_renum(i)
rstart(i) = counter + 1
do k=phaml_matrix%begin_row(j)+1,phaml_matrix%end_row(j)
if (phaml_matrix%column_index(k) == NO_ENTRY) cycle
if (phaml_matrix%equation_type(phaml_matrix%column_index(k)) == DIRICHLET) cycle
if (phaml_matrix%column_index(k) >= phaml_matrix%begin_level(use_nlev+1)) cycle
counter = counter + 1
connec(counter) = nodir_renum(phaml_matrix%column_index(k))
orig_bandwd = max(orig_bandwd,abs(connec(counter)-i))
end do
degree(i) = counter - rstart(i) + 1
end do
! Use CALGO 582 (Gibbs, Poole and Stockmeyer) to find a bandwidth reduction
! ordering
workd = 6*lapack_matrix%neq+3
allocate(work(workd),stat=astat)
if (astat /= 0) then
ierr = ALLOC_FAILED
call fatal("allocation failed in make_lapack_symm_band")
return
endif
minband_renum = (/ (i,i=1,phaml_matrix%neq) /)
call gpskca(lapack_matrix%neq,degree,rstart,connec,.false.,workd,minband_renum,&
work,lapack_matrix%halfbandwidth,profil,jerr,space)
if (jerr /= 0) then
work(1:lapack_matrix%neq) = (/ (i,i=1,lapack_matrix%neq) /) ! contains minband_inv_renum
minband_renum(1:lapack_matrix%neq) = (/ (i,i=1,lapack_matrix%neq) /)
lapack_matrix%halfbandwidth = orig_bandwd
call warning("Bandwidth reduction reordering routine failed.", &
"Rediculously large bandwidth may cause allocation failure or long run time.")
endif
! done with column pointers for reduced array
deallocate(connec)
! compose nodir and minband renumberings
! renumbering and inverse renumbering of the equations. renum(i) is the
! equation number in the symmetric matrix corresponding to original equation i;
! dimension renum(neq(use_nlev)); renum(i) = -1 if i is a Dirichlet point.
! inv_renum(i) is the original equation number corresponding to the symmetric
! matrix equation i; dimension inv_renum(symm_neq).
allocate(lapack_matrix%renum(phaml_matrix%neq), &
lapack_matrix%inv_renum(lapack_matrix%neq),stat=astat)
if (astat /= 0) then
ierr = ALLOC_FAILED
call fatal("allocation failed in make_lapack_symm_band")
return
endif
do i=1,phaml_matrix%neq
if (nodir_renum(i) == -1) then
lapack_matrix%renum(i) = -1
else
lapack_matrix%renum(i) = minband_renum(nodir_renum(i))
endif
end do
do i=1,lapack_matrix%neq
lapack_matrix%inv_renum(i) = nodir_inv_renum(work(i))
end do
! done with work space for gpskca
deallocate(work)
! Copy matrix values to symmetric band form, upper triangle version
allocate(lapack_matrix%matrix(lapack_matrix%halfbandwidth+1,lapack_matrix%neq),&
stat=astat)
if (astat /= 0) then
ierr = ALLOC_FAILED
call fatal("allocation failed in make_lapack_symm_band")
return
endif
lapack_matrix%matrix = 0.0_my_real
do i=1,use_nlev
do j=phaml_matrix%begin_level(i),phaml_matrix%begin_level(i+1)-1
if (phaml_matrix%equation_type(j) == DIRICHLET) cycle
row = lapack_matrix%renum(j)
if (row == -1) cycle
do k=phaml_matrix%begin_row(j),phaml_matrix%end_row(j)
if (phaml_matrix%column_index(k) == NO_ENTRY) cycle
if (phaml_matrix%equation_type(phaml_matrix%column_index(k)) == DIRICHLET) cycle
if (phaml_matrix%column_index(k) >= phaml_matrix%begin_level(use_nlev+1)) cycle
col = lapack_matrix%renum(phaml_matrix%column_index(k))
if (col == -1) cycle
if (col < row) cycle
lapack_matrix%matrix(lapack_matrix%halfbandwidth+1+row-col,col) = &
phaml_matrix%matrix_val(k)
end do
end do
end do
! Factor the matrix
if (my_real == kind(0.0e0)) then
call spbtrf("U",lapack_matrix%neq,lapack_matrix%halfbandwidth, &
lapack_matrix%matrix,lapack_matrix%halfbandwidth+1,info)
elseif (my_real == kind(0.0d0)) then
call dpbtrf("U",lapack_matrix%neq,lapack_matrix%halfbandwidth, &
lapack_matrix%matrix,lapack_matrix%halfbandwidth+1,info)
else
ierr = PHAML_INTERNAL_ERROR
call fatal("my_real is neither default single nor double precision")
return
endif
if (info /= 0) then
ierr = PHAML_INTERNAL_ERROR
call fatal("LAPACK SPBTRF failed.",intlist=(/info/))
return
endif
! allocate the rhs
allocate(lapack_matrix%rhs(lapack_matrix%neq,1),stat=astat)
if (astat /= 0) then
ierr = ALLOC_FAILED
call fatal("allocation failed in make_lapack_symm_band")
return
endif
end subroutine make_lapack_symm_band
! --------------------
subroutine make_lapack_gen_band(use_nlev,phaml_matrix,lapack_matrix)
! --------------------
!----------------------------------------------------
! This routine makes and factorizes a LAPACK general band matrix from the
! first use_nlev refinement levels of the PHAML matrix, which should be in
! nodal form for level use_nlev. Dirichlet equations are omitted; make
! sure they are accounted for when setting up the right hand side.
! halfbandwidth does not include the diagonal. lapack_matrix%rhs is
! allocated but not set.
!----------------------------------------------------
!----------------------------------------------------
! Dummy arguments
integer, intent(in) :: use_nlev
type(linsys_type), intent(in) :: phaml_matrix
type(lapack_band_matrix), intent(out) :: lapack_matrix
!----------------------------------------------------
! Local variables:
integer :: i, j, k, num_nonzero, workd, astat, counter, space, jerr, profil, &
orig_bandwd, row, col, info
integer :: nodir_renum(phaml_matrix%neq), nodir_inv_renum(phaml_matrix%neq), &
degree(phaml_matrix%neq), rstart(phaml_matrix%neq), &
minband_renum(phaml_matrix%neq)
integer, allocatable :: connec(:), work(:)
!----------------------------------------------------
! Begin executable code
! count the number of equations without Dirichlet points, and the number
! of nonzeroes in the non-Dirichlet rows of mat, and create a renumbering
! that omits Dirichlet points.
lapack_matrix%neq = 0
num_nonzero = 0
nodir_renum = -1
do i=1,use_nlev
do j=phaml_matrix%begin_level(i),phaml_matrix%begin_level(i+1)-1
if (phaml_matrix%equation_type(j) == DIRICHLET) cycle
lapack_matrix%neq = lapack_matrix%neq + 1
nodir_renum(j) = lapack_matrix%neq
nodir_inv_renum(lapack_matrix%neq) = j
do k=phaml_matrix%begin_row(j),phaml_matrix%end_row(j)
if (phaml_matrix%column_index(k) == NO_ENTRY) cycle
if (phaml_matrix%equation_type(phaml_matrix%column_index(k)) == DIRICHLET) cycle
if (phaml_matrix%column_index(k) >= phaml_matrix%begin_level(use_nlev+1)) cycle
num_nonzero = num_nonzero + 1
end do
end do
end do
! nullify all array components of lapack_matrix
nullify(lapack_matrix%matrix,lapack_matrix%rhs,lapack_matrix%renum, &
lapack_matrix%inv_renum,lapack_matrix%ipiv)
! if no equations, never use lapack_matrix so don't continue
if (lapack_matrix%neq == 0) return
! create column pointers for the matrix with Dirichlet points removed
allocate(connec(num_nonzero-lapack_matrix%neq),stat=astat)
if (astat /= 0) then
ierr = ALLOC_FAILED
call fatal("allocation failed in make_lapack_gen_band")
return
endif
orig_bandwd = 0
counter = 0
do i=1,lapack_matrix%neq
j = nodir_inv_renum(i)
rstart(i) = counter + 1
do k=phaml_matrix%begin_row(j)+1,phaml_matrix%end_row(j)
if (phaml_matrix%column_index(k) == NO_ENTRY) cycle
if (phaml_matrix%equation_type(phaml_matrix%column_index(k)) == DIRICHLET) cycle
if (phaml_matrix%column_index(k) >= phaml_matrix%begin_level(use_nlev+1)) cycle
counter = counter + 1
connec(counter) = nodir_renum(phaml_matrix%column_index(k))
orig_bandwd = max(orig_bandwd,abs(connec(counter)-i))
end do
degree(i) = counter - rstart(i) + 1
end do
! Use CALGO 582 (Gibbs, Poole and Stockmeyer) to find a bandwidth reduction
! ordering
workd = 6*lapack_matrix%neq+3
allocate(work(workd),stat=astat)
if (astat /= 0) then
ierr = ALLOC_FAILED
call fatal("allocation failed in make_lapack_gen_band")
return
endif
work = 0 ! BUG workaround for bug in gpskca; seems to have assumed initial 0's
minband_renum = (/ (i,i=1,phaml_matrix%neq) /)
call gpskca(lapack_matrix%neq,degree,rstart,connec,.false.,workd,minband_renum,&
work,lapack_matrix%halfbandwidth,profil,jerr,space)
if (jerr /= 0) then
work(1:lapack_matrix%neq) = (/ (i,i=1,lapack_matrix%neq) /) ! contains minband_inv_renum
minband_renum(1:lapack_matrix%neq) = (/ (i,i=1,lapack_matrix%neq) /)
lapack_matrix%halfbandwidth = orig_bandwd
call warning("Bandwidth reduction reordering routine failed.", &
"Rediculously large bandwidth may cause allocation failure or long run time.")
endif
! done with column pointers for reduced array
deallocate(connec)
! compose nodir and minband renumberings
! renumbering and inverse renumbering of the equations. renum(i) is the
! equation number in the band matrix corresponding to original equation i;
! dimension renum(neq(use_nlev)); renum(i) = -1 if i is a Dirichlet point.
! inv_renum(i) is the original equation number corresponding to the band
! matrix equation i; dimension inv_renum(gen_neq).
allocate(lapack_matrix%renum(phaml_matrix%neq),lapack_matrix%inv_renum(lapack_matrix%neq),stat=astat)
if (astat /= 0) then
ierr = ALLOC_FAILED
call fatal("allocation failed in make_lapack_gen_band")
return
endif
do i=1,phaml_matrix%neq
if (nodir_renum(i) == -1) then
lapack_matrix%renum(i) = -1
else
lapack_matrix%renum(i) = minband_renum(nodir_renum(i))
endif
end do
do i=1,lapack_matrix%neq
lapack_matrix%inv_renum(i) = nodir_inv_renum(work(i))
end do
! done with work space for gpskca
deallocate(work)
! Copy matrix values to general band form with extra space for factorization
allocate(lapack_matrix%matrix(3*lapack_matrix%halfbandwidth+1,lapack_matrix%neq), &
lapack_matrix%ipiv(lapack_matrix%neq),stat=astat)
if (astat /= 0) then
ierr = ALLOC_FAILED
call fatal("allocation failed in make_lapack_gen_band")
return
endif
lapack_matrix%matrix = 0.0_my_real
do i=1,use_nlev
do j=phaml_matrix%begin_level(i),phaml_matrix%begin_level(i+1)-1
if (phaml_matrix%equation_type(j) == DIRICHLET) cycle
row = lapack_matrix%renum(j)
if (row == -1) cycle
do k=phaml_matrix%begin_row(j),phaml_matrix%end_row(j)
if (phaml_matrix%column_index(k) == NO_ENTRY) cycle
if (phaml_matrix%equation_type(phaml_matrix%column_index(k)) == DIRICHLET) cycle
if (phaml_matrix%column_index(k) >= phaml_matrix%begin_level(use_nlev+1)) cycle
col = lapack_matrix%renum(phaml_matrix%column_index(k))
if (col == -1) cycle
lapack_matrix%matrix(2*lapack_matrix%halfbandwidth+1+row-col,col) = &
phaml_matrix%matrix_val(k)
end do
end do
end do
! Factor the matrix
if (my_real == kind(0.0e0)) then
call sgbtrf(lapack_matrix%neq,lapack_matrix%neq,lapack_matrix%halfbandwidth,&
lapack_matrix%halfbandwidth,lapack_matrix%matrix, &
3*lapack_matrix%halfbandwidth+1,lapack_matrix%ipiv,info)
elseif (my_real == kind(0.0d0)) then
call dgbtrf(lapack_matrix%neq,lapack_matrix%neq,lapack_matrix%halfbandwidth,&
lapack_matrix%halfbandwidth,lapack_matrix%matrix, &
3*lapack_matrix%halfbandwidth+1,lapack_matrix%ipiv,info)
else
ierr = PHAML_INTERNAL_ERROR
call fatal("my_real is neither default single nor double precision")
return
endif
if (info /= 0) then
ierr = PHAML_INTERNAL_ERROR
call fatal("LAPACK SGBTRF failed.",intlist=(/info/))
return
endif
! allocate the rhs
allocate(lapack_matrix%rhs(lapack_matrix%neq,1),stat=astat)
if (astat /= 0) then
ierr = ALLOC_FAILED
call fatal("allocation failed in make_lapack_gen_band")
return
endif
end subroutine make_lapack_gen_band
! -------------------
subroutine destroy_lapack_band(lapack_matrix)
! -------------------
!----------------------------------------------------
! This routine gets rid of the band storage of a matrix
!----------------------------------------------------
!----------------------------------------------------
! Dummy arguments
type(lapack_band_matrix), intent(inout) :: lapack_matrix
!----------------------------------------------------
! Local variables:
integer :: astat
!----------------------------------------------------
! Begin executable code
if (associated(lapack_matrix%matrix)) deallocate(lapack_matrix%matrix, stat=astat)
if (associated(lapack_matrix%rhs)) deallocate(lapack_matrix%rhs, stat=astat)
if (associated(lapack_matrix%renum)) deallocate(lapack_matrix%renum, stat=astat)
if (associated(lapack_matrix%inv_renum)) deallocate(lapack_matrix%inv_renum, stat=astat)
if (associated(lapack_matrix%ipiv)) deallocate(lapack_matrix%ipiv, stat=astat)
end subroutine destroy_lapack_band
! ----------
subroutine lapack_spd(use_nlev,phaml_matrix,lapack_matrix)
! ----------
!----------------------------------------------------
! This routine solves the linear system using the LAPACK routines for
! symmetric positive definite band matricies.
!----------------------------------------------------
!----------------------------------------------------
! Dummy arguments
integer, intent(in) :: use_nlev
type(linsys_type), intent(inout) :: phaml_matrix
type(lapack_band_matrix), intent(inout) :: lapack_matrix
!----------------------------------------------------
! Local variables:
integer :: info,i,j,k,lev
!----------------------------------------------------
! Begin executable code
! if the number of equations is 0, there is nothing to solve
if (lapack_matrix%neq == 0) return
! Create the right hand side, by moving to the new numbering and eliminating
! Dirichlet boundary conditions
do lev=1,use_nlev
do i=phaml_matrix%begin_level(lev),phaml_matrix%begin_level(lev+1)-1
if (phaml_matrix%equation_type(i) == DIRICHLET) cycle
j = lapack_matrix%renum(i)
if (j == -1) cycle
lapack_matrix%rhs(j,1) = phaml_matrix%rhs(i) + phaml_matrix%r_mine(i) + &
phaml_matrix%r_others(i)
do k=phaml_matrix%begin_row(i),phaml_matrix%end_row(i)
if (phaml_matrix%column_index(k) == NO_ENTRY) cycle
if (phaml_matrix%equation_type(phaml_matrix%column_index(k)) == DIRICHLET) then
lapack_matrix%rhs(j,1)=lapack_matrix%rhs(j,1) - &
phaml_matrix%matrix_val(k)*phaml_matrix%solution(phaml_matrix%column_index(k))
endif
end do
end do
end do
! Solve the system
if (my_real == kind(0.0e0)) then
call spbtrs("U",lapack_matrix%neq,lapack_matrix%halfbandwidth,1, &
lapack_matrix%matrix,lapack_matrix%halfbandwidth+1, &
lapack_matrix%rhs,lapack_matrix%neq,info)
elseif (my_real == kind(0.0d0)) then
call dpbtrs("U",lapack_matrix%neq,lapack_matrix%halfbandwidth,1, &
lapack_matrix%matrix,lapack_matrix%halfbandwidth+1, &
lapack_matrix%rhs,lapack_matrix%neq,info)
else
ierr = PHAML_INTERNAL_ERROR
call fatal("my_real is neither default single nor double precision")
return
endif
if (info /= 0) then
ierr = PHAML_INTERNAL_ERROR
call fatal("LAPACK SPBTRS failed.",intlist=(/info/))
return
endif
! Copy the solution into the solution vector
do i=1,lapack_matrix%neq
phaml_matrix%solution(lapack_matrix%inv_renum(i)) = lapack_matrix%rhs(i,1)
end do
end subroutine lapack_spd
! ------------
subroutine lapack_indef(use_nlev,phaml_matrix,lapack_matrix)
! ------------
!----------------------------------------------------
! This routine solves the linear system using the LAPACK routines for
! general band matricies.
!----------------------------------------------------
!----------------------------------------------------
! Dummy arguments
integer, intent(in) :: use_nlev
type(linsys_type), intent(inout) :: phaml_matrix
type(lapack_band_matrix), intent(inout) :: lapack_matrix
!----------------------------------------------------
! Local variables:
integer :: info,i,j,k,lev
!----------------------------------------------------
! Begin executable code
! if the number of equations is 0, there is nothing to solve
if (lapack_matrix%neq == 0) return
! Create the right hand side, by moving to the new numbering and eliminating
! Dirichlet boundary conditions
do lev=1,use_nlev
do i=phaml_matrix%begin_level(lev),phaml_matrix%begin_level(lev+1)-1
if (phaml_matrix%equation_type(i) == DIRICHLET) cycle
j = lapack_matrix%renum(i)
if (j == -1) cycle
lapack_matrix%rhs(j,1) = phaml_matrix%rhs(i) + phaml_matrix%r_mine(i) + &
phaml_matrix%r_others(i)
do k=phaml_matrix%begin_row(i),phaml_matrix%end_row(i)
if (phaml_matrix%column_index(k) == NO_ENTRY) cycle
if (phaml_matrix%equation_type(phaml_matrix%column_index(k)) == DIRICHLET) then
lapack_matrix%rhs(j,1)=lapack_matrix%rhs(j,1) - &
phaml_matrix%matrix_val(k)*phaml_matrix%solution(phaml_matrix%column_index(k))
endif
end do
end do
end do
! Solve the system
if (my_real == kind(0.0e0)) then
call sgbtrs("N",lapack_matrix%neq,lapack_matrix%halfbandwidth, &
lapack_matrix%halfbandwidth,1,lapack_matrix%matrix, &
3*lapack_matrix%halfbandwidth+1,lapack_matrix%ipiv, &
lapack_matrix%rhs,lapack_matrix%neq,info)
elseif (my_real == kind(0.0d0)) then
call dgbtrs("N",lapack_matrix%neq,lapack_matrix%halfbandwidth, &
lapack_matrix%halfbandwidth,1,lapack_matrix%matrix, &
3*lapack_matrix%halfbandwidth+1,lapack_matrix%ipiv, &
lapack_matrix%rhs,lapack_matrix%neq,info)
else
ierr = PHAML_INTERNAL_ERROR
call fatal("my_real is neither default single nor double precision")
return
endif
if (info /= 0) then
ierr = PHAML_INTERNAL_ERROR
call fatal("LAPACK _GBTRS failed.",intlist=(/info/))
return
endif
! Copy the solution into the solution vector
do i=1,lapack_matrix%neq
phaml_matrix%solution(lapack_matrix%inv_renum(i)) = lapack_matrix%rhs(i,1)
end do
end subroutine lapack_indef
! -------------
subroutine lapack_precon(invec,outvec,choice,matrix,procs, &
solver_cntl,still_sequential)
! -------------
!----------------------------------------------------
! This routine applies a preconditioner based on LAPACK, either using
! a solve on the coarse grid or a domain decomposition.
!----------------------------------------------------
!----------------------------------------------------
! Dummy arguments
real(my_real), intent(in) :: invec(:)
real(my_real), intent(out) :: outvec(:)
integer, intent(in) :: choice
type(linsys_type), intent(inout) :: matrix
type(proc_info), intent(in) :: procs
type(solver_options), intent(in) :: solver_cntl
logical, intent(in) :: still_sequential
!----------------------------------------------------
! Local variables:
real(my_real) :: holdrhs(matrix%neq), &
holdsoln(0:matrix%neq)
!----------------------------------------------------
! Begin executable code
! Keep rhs and solution
holdrhs = matrix%rhs
holdsoln = matrix%solution
! Copy the invec to rhs; the size of invec should be the same as rhs
matrix%rhs = invec
! Set the initial guess to 0.0
matrix%solution(1:) = 0.0_my_real
! Set Dirichlet points
where (matrix%equation_type == DIRICHLET) &
matrix%solution(1:) = matrix%rhs
! Call the selected precondtioner
select case(choice)
case (FUDOP_DD_PRECONDITION)
call fudop_dd_precon(matrix,procs,solver_cntl,still_sequential)
case (COARSE_GRID_PRECONDITION)
call coarse_precon(matrix,solver_cntl)
end select
! Copy solution (which now contains the preconditioner times invec) to outvec
outvec = matrix%solution(1:)
! Restore rhs and solution
matrix%rhs = holdrhs
matrix%solution = holdsoln
end subroutine lapack_precon
! ---------------
subroutine fudop_dd_precon(matrix,procs,solver_cntl, &
still_sequential)
! ---------------
!----------------------------------------------------
! This routine performs a fudop domain decomposition preconditioner.
! Each processor solves its local problem exactly with LAPACK indefinite
! solver. It then obtains the solution for unowned equations from the owner,
! makes the unowned points Dirichlet points, and solves again. Obtaining the
! solution from other processors and solving it iterated some number of times.
!----------------------------------------------------
!----------------------------------------------------
! Dummy arguments
type(linsys_type), intent(inout) :: matrix
type(proc_info), intent(in) :: procs
type(solver_options), intent(in) :: solver_cntl
logical, intent(in) :: still_sequential
!----------------------------------------------------
! Local variables:
logical :: gen_band_already_existed
integer :: i, ddit, toplev
real(my_real) :: resid(matrix%neq),hold_rhs(matrix%neq),hold_soln(matrix%neq)
!----------------------------------------------------
! Begin executable code
! set the top level of the hierarchy based on the number of equations
if (matrix%neq == matrix%neq_vert) then
toplev = matrix%nlev
elseif (matrix%neq == matrix%neq_vert+matrix%neq_edge) then
toplev = matrix%nlev+1
else
toplev = matrix%nlev+2
endif
! make a LAPACK general band matrix if it doesn't already exist
if (.not. matrix%lapack_gen_band_exists) then
gen_band_already_existed = .false.
call make_lapack_gen_band(toplev,matrix,matrix%lapack_mat)
matrix%lapack_gen_band_exists = .true.
else
gen_band_already_existed = .true.
endif
! call lapack solver with the local matrix
call lapack_indef(toplev,matrix,matrix%lapack_mat)
! Domain decomposition iterations
do ddit = 1,solver_cntl%dd_iterations
! Compute the global residual and set to 0 at points I don't own
call matrix_times_vector(matrix%solution(1:),resid,matrix,procs, &
still_sequential,1711,1712,1713,1714,1715,1716, &
nocomm2=.true.)
do i=1,matrix%neq
if (matrix%iown(i)) then
resid(i) = matrix%rhs(i) - resid(i)
else
resid(i) = 0
endif
end do
! Set the rhs to the residual and Dirichlet points to 0, to set up an
! error correction problem
hold_rhs = matrix%rhs
hold_soln = matrix%solution(1:)
matrix%rhs = resid
where (matrix%equation_type == DIRICHLET) matrix%solution(1:) = 0.0_my_real
! convert to hierarchical basis, exchange rhs with other processors, and
! convert back to nodal basis
if (.not. still_sequential) then
call basis_change(toplev,TO_HIER,matrix)
call exchange_fudop_vect(matrix%rhs,procs,matrix,1701,1702,1703)
call basis_change(toplev,TO_NODAL,matrix)
endif
! Solve the system
call lapack_indef(toplev,matrix,matrix%lapack_mat)
! Add correction to solution, and reset rhs and Dirichlet b.c. values
matrix%solution(1:) = hold_soln + matrix%solution(1:)
matrix%rhs = hold_rhs
end do
! Get rid of the band matrix if this routine created it
if (.not. gen_band_already_existed) then
call destroy_lapack_band(matrix%lapack_mat)
matrix%lapack_gen_band_exists = .false.
endif
end subroutine fudop_dd_precon
! -------------
subroutine coarse_precon(matrix,solver_cntl)
! -------------
!----------------------------------------------------
! This routine uses LAPACK as a preconditioner by coarsening to a problem
! small enough to solve with LAPACK and interpolating back to the fine grid.
! Each processor uses LAPACK on the grid it sees with no communication.
! Extraction of the equations it owns and combination with other
! processor's results are the responsiility of the caller.
!----------------------------------------------------
!----------------------------------------------------
! Dummy arguments
type(linsys_type), intent(inout) :: matrix
type(solver_options), intent(in) :: solver_cntl
!----------------------------------------------------
! Local variables:
integer, save :: maxeq, uselev, hold_neq, lev, eq, i, toplev
logical :: i_made_gen_band
!----------------------------------------------------
! Begin executable code
! set the top level of the hierarchy based on the number of equations
if (matrix%neq == matrix%neq_vert) then
toplev = matrix%nlev
elseif (matrix%neq == matrix%neq_vert+matrix%neq_edge) then
toplev = matrix%nlev+1
else
toplev = matrix%nlev+2
endif
! determine how many levels make up a small enough grid
maxeq = solver_cntl%coarse_size
uselev = 1
do
if (uselev+1 > toplev) exit
if (matrix%begin_level(uselev+2) > maxeq) exit
uselev = uselev + 1
end do
! coarsen to a small grid
do lev=toplev,uselev+1,-1
call basis_change(lev,TO_HIER,matrix)
end do
! put the Dirichlet boundary values back to their nodal values
where (matrix%equation_type == DIRICHLET) &
matrix%solution(1:) = matrix%rhs
! call lapack solver
hold_neq = matrix%neq
matrix%neq = matrix%begin_level(uselev+1)-1
i_made_gen_band = .false.
if (.not. matrix%lapack_gen_band_exists) then
call make_lapack_gen_band(uselev,matrix,matrix%lapack_mat)
matrix%lapack_gen_band_exists = .true.
i_made_gen_band = .true.
endif
call lapack_indef(uselev,matrix,matrix%lapack_mat)
matrix%neq = hold_neq
! interpolate to the original grid
do lev=uselev+1,toplev
call basis_change(lev,TO_NODAL,matrix)
end do
! perform a Gauss-Seidel iteration to get the solution out of the subspace
do eq=1,matrix%neq
if (matrix%equation_type(eq) == DIRICHLET) cycle
matrix%solution(eq) = matrix%rhs(eq) + matrix%r_mine(eq) + matrix%r_others(eq)
do i=matrix%begin_row(eq)+1,matrix%end_row(eq)
matrix%solution(eq) = matrix%solution(eq)-matrix%matrix_val(i)*matrix%solution(matrix%column_index(i))
end do
matrix%solution(eq) = matrix%solution(eq)/matrix%matrix_val(matrix%begin_row(eq))
end do
end subroutine coarse_precon
end module lapack_solve
| {"hexsha": "1134461b3f82f228c5df4c79f8fb1111e0710c96", "size": 30793, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "src/lapack_solve.f90", "max_stars_repo_name": "qsnake/phaml", "max_stars_repo_head_hexsha": "8925b4c32657bbd9f81cd5f8f9d6739151c66fec", "max_stars_repo_licenses": ["mpich2"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2018-09-07T15:46:34.000Z", "max_stars_repo_stars_event_max_datetime": "2018-09-07T15:46:34.000Z", "max_issues_repo_path": "src/lapack_solve.f90", "max_issues_repo_name": "qsnake/phaml", "max_issues_repo_head_hexsha": "8925b4c32657bbd9f81cd5f8f9d6739151c66fec", "max_issues_repo_licenses": ["mpich2"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/lapack_solve.f90", "max_forks_repo_name": "qsnake/phaml", "max_forks_repo_head_hexsha": "8925b4c32657bbd9f81cd5f8f9d6739151c66fec", "max_forks_repo_licenses": ["mpich2"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.2525027809, "max_line_length": 108, "alphanum_fraction": 0.649335888, "num_tokens": 8285} |
import Aoc
import Data.Fin
import Data.List
import Data.List1
import Data.Nat
import Data.Strings
import Data.Vect
%default total
Passport : Type
Passport = List (String, String)
||| Split a string at the first colon in it: "abc:def" -> ("abc", "def").
splitColon : String -> (String, String)
splitColon s = let (p, q) = break (==':') s in (p, pack $ drop 1 $ unpack q)
requiredFields : List String
requiredFields = ["byr","iyr","eyr","hgt","hcl","ecl","pid"] -- not "cid"
||| *: Are all required fields present?
isValid1 : Passport -> Bool
isValid1 passport =
all (\k => any ((==k) . fst) passport) requiredFields
||| Check if a string represents a natural number in a given range [lo, hi].
isNatInRange : Nat -> Nat -> String -> Bool
isNatInRange lo hi s =
case parsePositive s of
Nothing => False
Just n => n >= lo && n <= hi
||| Validate a passport field.
isValidField : (String, String) -> Bool
isValidField (k, v) =
case k of
"byr" => isNatInRange 1920 2002 v
"iyr" => isNatInRange 2010 2020 v
"eyr" => isNatInRange 2020 2030 v
"hgt" =>
case span isDigit v of
(n, "cm") => isNatInRange 150 193 n
(n, "in") => isNatInRange 59 76 n
_ => False
"hcl" =>
case unpack v of
('#'::xs) => length xs == 6 && all isLowerHexDigit xs
_ => False
"ecl" => Prelude.elem v ["amb","blu","brn","gry","grn","hzl","oth"]
"pid" => let xs = unpack v in length xs == 9 && all isDigit xs
"cid" => True
_ => False
||| **: Are all required field present, and are all fields valid?
isValid2 : Passport -> Bool
isValid2 passport =
isValid1 passport && all isValidField passport
partial
main : IO ()
main = do
paras <- readParagraphs
let passports = map (map splitColon . words . unwords) paras
putStr "* "; printLn (count isValid1 passports)
putStr "** "; printLn (count isValid2 passports)
| {"hexsha": "dbcbf3764d1c2ac7bcd2deac8415a1b220ccdef6", "size": 1891, "ext": "idr", "lang": "Idris", "max_stars_repo_path": "day-4.idr", "max_stars_repo_name": "lynn/aoc-2020", "max_stars_repo_head_hexsha": "a66b9cac109f9c3b51f18f5fb2d54ebdf395bb10", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 4, "max_stars_repo_stars_event_min_datetime": "2020-12-01T13:40:27.000Z", "max_stars_repo_stars_event_max_datetime": "2021-01-22T13:23:21.000Z", "max_issues_repo_path": "day-4.idr", "max_issues_repo_name": "lynn/aoc-2020", "max_issues_repo_head_hexsha": "a66b9cac109f9c3b51f18f5fb2d54ebdf395bb10", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "day-4.idr", "max_forks_repo_name": "lynn/aoc-2020", "max_forks_repo_head_hexsha": "a66b9cac109f9c3b51f18f5fb2d54ebdf395bb10", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.0923076923, "max_line_length": 76, "alphanum_fraction": 0.6266525648, "num_tokens": 583} |
"""
MIT License
Copyright (c) 2017 Ragini Sharma
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import numpy as np
from numpy.linalg import inv
from numpy.linalg import pinv
class regressor(object):
"""
This is a sample class for miniproject 1.
Args:
data: Is a tuple, ``(x,y)``
``x`` is a two or one dimensional ndarray ordered such that axis 0 is independent
data and data is spread along axis 1. If the array had only one dimension, it implies
that data is 1D.
``y`` is a 1D ndarray it will be of the same length as axis 0 or x.
"""
"""
Below Gradient descent calculation is Batch Gradient Descent and not stochatic or mini batch as we are taking entire
dataset in one iteration. Loss is also calculated for the entire dataset in each iteration.So each iteration
basically sees the entire dataset. You need to read the entire data set into the memory.
"""
def gradient_descent_calculation(self,theta,alpha,noOfIterations):
sample_size = self.x.shape[0]
actual_value = (self.y)
count =0
lamda = 5
while(count <= noOfIterations):
predicted_value = np.dot(self.x, theta)
costfunction =np.sum( (predicted_value - actual_value)** 2) / (2 * sample_size)
print "costfunction", costfunction, "Iteration", count
gradientvalue = (np.dot(self.x.T, (predicted_value - actual_value) ))/sample_size
theta = (theta*(1- (alpha*lamda)/sample_size)) - alpha*gradientvalue
count =count + 1
return theta
def __init__(self, data):
self.x, self.y = data
# Here is where your training and all the other magic should happen.
# Once trained you should have these parameters with ready.
noOfIterations = 10000
alpha = 0.005
self.x = np.concatenate((np.ones((self.x.shape[0],1)), self.x), axis = 1)
theta = np.ones((self.x.shape[1],1))
result = self.gradient_descent_calculation(theta, alpha, noOfIterations)
self.w = result[1:]
self.b = result[:1]
def get_params (self):
"""
Method that should return the model parameters.
Returns:
tuple of numpy.ndarray: (w, b).
Notes:
This code will return a random numpy array for demonstration purposes.
"""
return (self.w, self.b)
def get_predictions (self, x):
"""
Method should return the outputs given unseen data
Args:
x: array similar to ``x`` in ``data``. Might be of different size.
Returns:
numpy.ndarray: ``y`` which is a 1D array of predictions of the same length as axis 0 of
``x``
Notes:
Temporarily returns random numpy array for demonstration purposes.
"""
# Here is where you write a code to evaluate the data and produce predictions.
result = np.dot(x, self.w) + self.b
return result
if __name__ == '__main__':
pass
| {"hexsha": "238cf3a1e3716035c486b881d96778de2d37b894", "size": 4313, "ext": "py", "lang": "Python", "max_stars_repo_path": "regressor.py", "max_stars_repo_name": "raginisharma14/Deep-Learning", "max_stars_repo_head_hexsha": "df201ff8db9f2f0f252c53f81dd4ace2a1372bd0", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "regressor.py", "max_issues_repo_name": "raginisharma14/Deep-Learning", "max_issues_repo_head_hexsha": "df201ff8db9f2f0f252c53f81dd4ace2a1372bd0", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "regressor.py", "max_forks_repo_name": "raginisharma14/Deep-Learning", "max_forks_repo_head_hexsha": "df201ff8db9f2f0f252c53f81dd4ace2a1372bd0", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.9416666667, "max_line_length": 119, "alphanum_fraction": 0.6243913749, "include": true, "reason": "import numpy,from numpy", "num_tokens": 940} |
#! /usr/bin/env python3
import numpy as np
def readcloudsfile(fname):
'''
Format is this:
4 rows of comments
A row of nlat, nlon, nlayers
A row of lat, lon, level, alt (m), p (bar), T (K), t_std (K),
EW vel (m/s), NS vel (m/s), vert vel (m/s), clouds 1-13 vis tau (650 nm),
cloud total vis tau, cloud 1-13 IR tau (5 um), cloud total IR tau
This last row spills over onto the next line for the last 3 entries. Tragic.
'''
ncomment = 4
ncol = 38
nextra = 3
# Get species names
with open(fname, 'r') as f:
next(f)
namestring = f.readline()
spec = namestring.split(':')[1].split(',')
spec = [x.strip((' \n')) for x in spec]
with open(fname, 'r') as f:
for i in range(ncomment):
next(f)
nlat, nlon, nlayer = [int(a) for a in f.readline().split()]
data = np.zeros((nlat * nlon * nlayer, ncol))
for i, line in enumerate(f.readlines()):
if i % 2 == 0:
data[i//2,:ncol-nextra] = [float(a) for a in line.split()]
else:
data[i//2,ncol-nextra:] = [float(a) for a in line.split()]
lat_all = data[:,0]
lon_all = data[:,1]
lev_all = data[:,2]
lat = np.unique(data[:,0])
lon = np.unique(data[:,1])
lev = np.unique(data[:,2])
data2 = np.zeros((nlat, nlon, nlayer, ncol - 3))
for i in range(data.shape[0]):
tlat, tlon, tlev = data[i,:3]
ilat = np.where(tlat == lat)
ilon = np.where(tlon == lon)
ilev = np.where(tlev == lev)
data2[ilat, ilon, ilev] = data[i,3:]
return lat, lon, lev, spec, data2
def calcq(taufname, massfname, partsizefname, densfname, r0):
'''
Calculates Q used by Lee et al. 2013 from a cloud report file.
WARNING: the cloud report files have a very specific format
that is assumed by this function. Be very careful.
'''
# Read files
# massdata is the same as taudata but the vis tau columns have been
# replaced with cloud mass in kg
lat, lon, lev, spec, taudata = readcloudsfile(taufname)
lat, lon, lev, spec, massdata = readcloudsfile(massfname)
nlat = len(lat)
nlon = len(lon)
nlev = len(lev)
# Particle size (from Michael)
a = np.loadtxt(partsizefname)
# Densities (Roman et al 2021)
rho = np.loadtxt(densfname)
# Calculate Qext
# tau = Q * pi * a**2 * n * delz
# Q = tau / (pi * a**2 * n * delz)
Rjup = 6.9911e7
ncloud = 13
V = np.zeros((nlat, nlon, nlev))
A = np.zeros((nlat, nlon, nlev))
n = np.zeros((nlat, nlon, nlev, ncloud))
Qext = np.zeros((nlat, nlon, nlev, ncloud))
Q = np.zeros((nlat, nlon, nlev, ncloud))
for ilat in range(nlat):
for ilon in range(nlon):
for ilev in range(nlev):
r = taudata[ilat,ilon,ilev,0] + r0
# These are not-so-great assumptions of cell sizes. Should ask
# Michael if he has exact numbers.
if ilev == 0:
ri = r - (taudata[ilat,ilon,ilev, 0] - \
taudata[ilat,ilon,ilev+1,0]) / 2.
rf = r + (taudata[ilat,ilon,ilev, 0] - \
taudata[ilat,ilon,ilev+1,0]) / 2.
elif ilev == nlev - 1:
ri = r
rf = r + (taudata[ilat,ilon,ilev-1,0] - \
taudata[ilat,ilon,ilev, 0]) / 2.
else:
ri = r - (taudata[ilat,ilon,ilev, 0] - \
taudata[ilat,ilon,ilev+1,0]) / 2.
rf = r + (taudata[ilat,ilon,ilev-1,0] - \
taudata[ilat,ilon,ilev, 0]) / 2.
if ilon == 0:
phii = lon[ilon] - (lon[ilon+1] - lon[ilon ]) / 2.
phif = lon[ilon] + (lon[ilon+1] - lon[ilon ]) / 2.
elif ilon == nlon - 1:
phii = lon[ilon] - (lon[ilon ] - lon[ilon-1]) / 2.
phif = lon[ilon] + (lon[ilon ] - lon[ilon-1]) / 2.
else:
phii = lon[ilon] - (lon[ilon ] - lon[ilon-1]) / 2.
phif = lon[ilon] + (lon[ilon+1] - lon[ilon ]) / 2.
if ilat == 0:
thetai = -90.
thetaf = lat[ilat] + (lat[ilat+1] - lat[ilat ]) / 2.
elif ilat == nlat - 1:
thetai = lat[ilat] - (lat[ilat ] - lat[ilat-1]) / 2.
thetaf = 90.
else:
thetai = lat[ilat] - (lat[ilat ] - lat[ilat-1]) / 2.
thetaf = lat[ilat] + (lat[ilat+1] - lat[ilat ]) / 2.
# Volume of the cell
V[ilat,ilon,ilev] = (rf**3 - ri**3) / 3 * \
(np.sin(np.deg2rad(thetaf)) - np.sin(np.deg2rad(thetai))) * \
(np.deg2rad(phif) - np.deg2rad(phii))
# Area of the (base of the) cell
A[ilat,ilon,ilev] = ri**2 * \
(np.sin(np.deg2rad(thetaf)) - np.sin(np.deg2rad(thetai))) * \
(np.deg2rad(phif) - np.deg2rad(phii))
for icloud in range(ncloud):
# I think mass is per unit area?
#n = 3. / 4. * massdata[ilat,ilon,ilev,7+icloud] / \
# (rf - ri) / (rho[icloud] * np.pi * a[ilev]**3)
n[ilat,ilon,ilev,icloud] = \
3. / 4. * massdata[ilat,ilon,ilev,7+icloud] * \
A[ilat,ilon,ilev] / \
(rho[icloud] * np.pi * a[ilev]**3. * V[ilat,ilon,ilev])
#n[ilat,ilon,ilev,icloud] = \
# 3. / 4. * massdata[ilat,ilon,ilev,7+icloud] / \
# (rho[icloud] * np.pi * a[ilev]**3)
if n[ilat,ilon,ilev,icloud] != 0.0:
Qext[ilat,ilon,ilev,icloud] = \
taudata[ilat,ilon,ilev,21+icloud] / \
(np.pi * a[ilev]**2 * \
n[ilat,ilon,ilev,icloud] * (rf - ri))
# Convert to Q, assuming 5 um
x = 2 * np.pi * a[ilev] / 5e-6
Q[ilat,ilon,ilev,icloud] = \
(5 / Qext[ilat,ilon,ilev,icloud] - x**0.2) * x**4
# This ensures that the optical depth will be
# zero, or very close to zero, in grid cells where
# the GCM believes there is no
# condensation. Otherwise, if these Qs are used in
# conjunction with a different chemistry
# prescription (e.g., equilibrium condensation),
# there may be optical depth where none is
# expected and such a forward model would give
# much different results than the GCM.
else:
Q[ilat,ilon,ilev,icloud] = 1e300
return Q, n, spec
| {"hexsha": "8e0ed6c455dc590ebd12faafa193109d5322f18c", "size": 7127, "ext": "py", "lang": "Python", "max_stars_repo_path": "theresa/scripts/calcq.py", "max_stars_repo_name": "rychallener/theresa", "max_stars_repo_head_hexsha": "886c6b74bee2edef7df9b6b54ce6d97de4aa4421", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2021-09-16T19:37:26.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-30T20:16:05.000Z", "max_issues_repo_path": "theresa/scripts/calcq.py", "max_issues_repo_name": "rychallener/theresa", "max_issues_repo_head_hexsha": "886c6b74bee2edef7df9b6b54ce6d97de4aa4421", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "theresa/scripts/calcq.py", "max_forks_repo_name": "rychallener/theresa", "max_forks_repo_head_hexsha": "886c6b74bee2edef7df9b6b54ce6d97de4aa4421", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 40.7257142857, "max_line_length": 81, "alphanum_fraction": 0.4605023151, "include": true, "reason": "import numpy", "num_tokens": 2202} |
import unittest
from pyscses.set_of_sites import SetOfSites
from pyscses.defect_species import DefectSpecies
from pyscses.defect_at_site import DefectAtSite
from pyscses.site import Site, LabelError
from unittest.mock import Mock, patch
from pyscses.constants import fundamental_charge
import numpy as np
def create_mock_defect_species(n):
labels = ['a', 'b', 'c', 'd', 'e']
valence = [-2.0, -1.0, 0.0, 1.0, 2.0]
mole_fraction = [0.15, 0.25, 0.35, 0.45, 0.55]
mobility = [0.1, 0.2, 0.3, 0.4, 0.5]
mock_defect_species = []
for i in range(n):
m = Mock(spec=DefectSpecies)
m.label = labels.pop()
m.mole_fraction = mole_fraction.pop()
m.valence = valence.pop()
m.mobility = mobility.pop()
m.fixed = False
mock_defect_species.append(m)
return mock_defect_species
def create_mock_defects_at_site(n):
labels = ['A', 'B', 'C', 'D', 'E']
valence = [-2.0, -1.0, 0.0, 1.0, 2.0]
mole_fraction = [0.15, 0.25, 0.35, 0.45, 0.55]
mobility = [0.1, 0.2, 0.3, 0.4, 0.5]
energies = [-0.1, -0.2, -0.3, -0.4, -0.5]
mock_defects_at_site = []
for i in range(n):
m = Mock(spec=DefectAtSite)
m.label = labels.pop()
m.valence = valence.pop()
m.mole_fraction = mole_fraction.pop()
m.mobility = mobility.pop()
m.energy = energies.pop()
m.fixed = False
mock_defects_at_site.append(m)
return mock_defects_at_site
class TestSiteInit(unittest.TestCase):
def test_site_is_initialised(self):
mock_defect_species = create_mock_defect_species(2)
mock_defects_at_site = create_mock_defects_at_site(2)
with patch('pyscses.site.DefectAtSite', autospec=True) as mock_DefectAtSite:
mock_DefectAtSite.side_effect = mock_defects_at_site
site = Site(label='A',
x=1.5,
defect_species=mock_defect_species,
defect_energies=[-0.2, +0.2])
self.assertEqual(site.label, 'A')
self.assertEqual(site.x, 1.5)
self.assertEqual(site.defect_species, mock_defect_species)
self.assertEqual(site.defect_energies, [-0.2, +0.2])
np.testing.assert_equal(site.scaling, np.array([1.0, 1.0]))
self.assertEqual(site.valence, 0.0)
self.assertEqual(site.saturation_parameter, 1.0)
self.assertEqual(site.fixed_defects, ())
self.assertEqual(site.mobile_defects, tuple(mock_defects_at_site))
self.assertEqual(site.alpha, 1.0)
def test_site_is_initialised_with_optional_args(self):
mock_defect_species = create_mock_defect_species(2)
with patch('pyscses.site.DefectAtSite', autospec=True) as mock_DefectAtSite:
mock_DefectAtSite.side_effect = create_mock_defects_at_site(2)
site = Site(label='B',
x=1.5,
defect_species=mock_defect_species,
defect_energies=[-0.2, +0.2],
scaling=[0.5, 0.4],
valence=-2.0,
saturation_parameter=0.1)
self.assertEqual(site.label, 'B')
self.assertEqual(site.x, 1.5)
self.assertEqual(site.defect_species, mock_defect_species)
self.assertEqual(site.defect_energies, [-0.2, +0.2])
np.testing.assert_equal(site.scaling, np.array([0.5, 0.4]))
self.assertEqual(site.valence, -2.0)
self.assertEqual(site.saturation_parameter, 0.1)
self.assertEqual(site.alpha, 0.1)
def test_site_init_with_mixed_mobile_and_fixed_defects(self):
mock_defect_species = create_mock_defect_species(3)
mock_defects_at_site = create_mock_defects_at_site(3)
mock_defects_at_site[0].fixed = False
mock_defects_at_site[0].mole_fraction = 0.4
mock_defects_at_site[1].fixed = True
mock_defects_at_site[1].mole_fraction = 0.3
mock_defects_at_site[2].fixed = True
mock_defects_at_site[2].mole_fraction = 0.2
with patch('pyscses.site.DefectAtSite', autospec=True) as mock_DefectAtSite:
mock_DefectAtSite.side_effect = mock_defects_at_site
site = Site(label='C',
x=1.5,
defect_species=mock_defect_species,
defect_energies=[-0.2, +0.2, 0.0])
self.assertEqual(site.fixed_defects, (mock_defects_at_site[1], mock_defects_at_site[2]))
self.assertEqual(site.mobile_defects[0], mock_defects_at_site[0])
self.assertEqual(site.alpha, 0.5)
def test_site_init_data_check_1(self):
"""Checks that initialising a Site object raises a ValueError if n(defect_species) != n(defect_energies)"""
mock_defect_species = create_mock_defect_species(1)
with patch('pyscses.site.DefectAtSite', autospec=True) as mock_DefectAtSite:
with self.assertRaises(ValueError):
site = Site(label='A',
x=1.5,
defect_species=mock_defect_species,
defect_energies=[-0.2, +0.2])
def test_site_init_data_check_2(self):
"""Checks that initialising a Site object raises a ValueError if n(defect_species) != n(scaling) (if passed)"""
mock_defect_species = create_mock_defect_species(2)
with patch('pyscses.site.DefectAtSite', autospec=True) as mock_DefectAtSite:
with self.assertRaises(ValueError):
site = Site(label='A',
x=1.5,
defect_species=mock_defect_species,
defect_energies=[-0.2, +0.2],
scaling=[0.5])
class TestSite(unittest.TestCase):
def setUp(self):
mock_defect_species = create_mock_defect_species(2)
mock_defects_at_site = create_mock_defects_at_site(2)
with patch('pyscses.site.DefectAtSite', autospec=True) as mock_DefectAtSite:
mock_DefectAtSite.side_effect = mock_defects_at_site
self.site = Site(label='A',
x=1.5,
defect_species=mock_defect_species,
defect_energies=[-0.2, +0.2])
def test_defect_with_label(self):
self.site.defects[0].label = 'foo'
self.site.defects[1].label = 'bar'
self.assertEqual(self.site.defect_with_label('foo'), self.site.defects[0])
self.assertEqual(self.site.defect_with_label('bar'), self.site.defects[1])
def test_defect_with_label_2(self):
"""Checks that defect_with_label() raises a LabelError if the argument does not match any of the defect labels for this site."""
self.site.defects[0].label = 'foo'
self.site.defects[1].label = 'bar'
with self.assertRaises(LabelError):
self.site.defect_with_label('banana')
def test_energies(self):
self.site.defects[0].energy = -0.2
self.site.defects[1].energy = +0.2
self.assertEqual(self.site.energies(), [-0.2, +0.2])
def test_probabilities_one(self):
self.site.defects[0].boltzmann_factor = Mock(return_value=0.1)
self.site.defects[0].mole_fraction = 0.2
self.site.defects[0].label = 'A'
self.site.defects[1].boltzmann_factor = Mock(return_value=0.1)
self.site.defects[1].mole_fraction = 0.1
self.site.defects[1].label = 'B'
exp_A = ((0.2*0.1/(1.0+(0.2*(0.1-1.0)+0.1*(0.1-1.0)))))
exp_B= ((0.1*0.1/(1.0+(0.2*(0.1-1.0)+0.1*(0.1-1.0)))))
self.assertEqual(self.site.probabilities(phi=1.0,
temp=298.0),
{'A': exp_A, 'B': exp_B})
def test_probabilities_two(self):
self.site.defects[0].boltzmann_factor = Mock(return_value=0.1)
self.site.defects[0].mole_fraction = 0.2
self.site.defects[0].label = 'A'
self.site.defects[0].fixed = True
self.site.alpha = 0.8
self.site.fixed_defects = (self.site.defects[0],)
self.site.defects[1].boltzmann_factor = Mock(return_value=0.1)
self.site.defects[1].mole_fraction = 0.1
self.site.defects[1].label = 'B'
self.site.mobile_defects = (self.site.defects[1],)
exp_A = 0.2
exp_B= 0.8*((0.1*0.1/(0.8+(0.1*(0.1-1.0)))))
self.assertEqual(self.site.probabilities(phi=1.0,
temp=298.0),
{'A': exp_A, 'B': exp_B})
def test_charge(self):
self.site.probabilities = Mock(return_value={'E': 0.1, 'D': 0.2})
self.site.defects[0].valence = 1.0
self.site.defects[1].valence = 2.0
self.site.scaling = 0.5
self.site.valence = 1.0
expected_value = ((1.0*0.1 + 2.0*0.2)*0.5 + 1.0) * fundamental_charge
self.assertEqual(self.site.charge(phi=1.0, temp=298.0),
expected_value)
if __name__ == '__main__':
unittest.main()
| {"hexsha": "3fde155cd46a47abea1b1dbce3dccb9f597c7774", "size": 8969, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/test_site.py", "max_stars_repo_name": "j-m-dean/pyscses", "max_stars_repo_head_hexsha": "6c2875cb87a8f91ae7aed382922c34b0e611ba85", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "tests/test_site.py", "max_issues_repo_name": "j-m-dean/pyscses", "max_issues_repo_head_hexsha": "6c2875cb87a8f91ae7aed382922c34b0e611ba85", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tests/test_site.py", "max_forks_repo_name": "j-m-dean/pyscses", "max_forks_repo_head_hexsha": "6c2875cb87a8f91ae7aed382922c34b0e611ba85", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 44.845, "max_line_length": 136, "alphanum_fraction": 0.6097669751, "include": true, "reason": "import numpy", "num_tokens": 2542} |
module timestep_mod
use globals_mod
real(dp), save :: DT_ITER
contains
function calc_timestep(mesh) result(timestep)
use mesh_mod, only: mesh_t
use mesh_mod, only: mesh_iterate_cells
type(mesh_t), intent(inout) :: mesh
real(dp) :: timestep
DT_ITER = HUGE(1.0)
call mesh_iterate_cells(mesh,calc_timestep_cb)
timestep = DT_ITER
end function
subroutine calc_timestep_cb(cell)
use mesh_mod, only: cell_t
use mesh_mod, only: cell_get_delta
use equations_mod, only: xlambdaMax
use equations_mod, only: ylambdaMax
type(cell_t), intent(inout) :: cell
real(dp) :: delta(N_DIMS)
real(dp) :: xlmax(N_NODES,N_NODES)
real(dp) :: ylmax(N_NODES,N_NODES)
real(dp) :: dtmin
xlmax = xlambdaMax(cell%state)
ylmax = ylambdaMax(cell%state)
delta = cell_get_delta(cell)
dtmin = min(delta(1)/maxval(xlmax),delta(2)/maxval(ylmax))
!$omp critical
DT_ITER = min(DT_ITER,dtmin)
!$omp end critical
end subroutine
end module
| {"hexsha": "845d36f813783e510e03fcfd749d88672dfc674c", "size": 1009, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "source/equations/euler/timestep_mod.f90", "max_stars_repo_name": "jmark/nemo2d", "max_stars_repo_head_hexsha": "a508f192d0f6da49e485ee9c8d1c049dbb81d033", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "source/equations/euler/timestep_mod.f90", "max_issues_repo_name": "jmark/nemo2d", "max_issues_repo_head_hexsha": "a508f192d0f6da49e485ee9c8d1c049dbb81d033", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "source/equations/euler/timestep_mod.f90", "max_forks_repo_name": "jmark/nemo2d", "max_forks_repo_head_hexsha": "a508f192d0f6da49e485ee9c8d1c049dbb81d033", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 19.0377358491, "max_line_length": 62, "alphanum_fraction": 0.6888007929, "num_tokens": 292} |
# Copyright (c) Facebook, Inc. and its affiliates.
import numpy as np
import torch
import torch.nn as nn
from torch.nn import LayerNorm
from torch.nn import TransformerEncoder, TransformerEncoderLayer
from torch.nn import TransformerDecoder, TransformerDecoderLayer
from torch.nn.init import xavier_uniform_
from fairmotion.models import decoders
class PositionalEncoding(nn.Module):
def __init__(self, d_model, dropout=0.5, max_len=5000):
super(PositionalEncoding, self).__init__()
self.dropout = nn.Dropout(p=dropout)
pe = torch.zeros(max_len, d_model)
position = torch.arange(0, max_len, dtype=torch.float).unsqueeze(1)
div_term = torch.exp(
torch.arange(0, d_model, 2).float() * (-np.log(10000.0) / d_model)
)
pe[:, 0::2] = torch.sin(position * div_term)
pe[:, 1::2] = torch.cos(position * div_term)
pe = pe.unsqueeze(0).transpose(0, 1)
self.register_buffer("pe", pe)
def forward(self, x):
x = x + self.pe[: x.size(0), :]
return self.dropout(x)
class TransformerLSTMModel(nn.Module):
def __init__(
self, ntoken, ninp, num_heads, hidden_dim, num_layers, dropout=0.5
):
super(TransformerLSTMModel, self).__init__()
self.pos_encoder = PositionalEncoding(ninp, dropout)
encoder_layers = TransformerEncoderLayer(
ninp, num_heads, hidden_dim, dropout
)
self.transformer_encoder = TransformerEncoder(
encoder_layers, num_layers
)
# Use Linear instead of Embedding for continuous valued input
self.encoder = nn.Linear(ntoken, ninp)
self.ninp = ninp
self.decoder = decoders.LSTMDecoder(
input_dim=ntoken, hidden_dim=hidden_dim, output_dim=ntoken,
)
self.num_layers = num_layers
self.init_weights()
def _generate_square_subsequent_mask(self, sz):
mask = (torch.triu(torch.ones(sz, sz)) == 1).transpose(0, 1)
mask = (
mask.float()
.masked_fill(mask == 0, float("-inf"))
.masked_fill(mask == 1, float(0.0))
)
return mask
def init_weights(self):
initrange = 0.1
self.encoder.weight.data.uniform_(-initrange, initrange)
for name, param in self.decoder.named_parameters():
nn.init.uniform_(param.data, -0.08, 0.08)
def forward(self, src, tgt, max_len=None, teacher_forcing_ratio=None):
src = self.encoder(src) * np.sqrt(self.ninp)
src = self.pos_encoder(src)
output = self.transformer_encoder(src, mask=None)
final_encoder_state = output[:, -1].unsqueeze(0).contiguous()
output = self.decoder(
tgt,
hidden=final_encoder_state,
cell=final_encoder_state,
max_len=max_len,
teacher_forcing_ratio=teacher_forcing_ratio,
)
return output
class TransformerModel(nn.Module):
def __init__(
self, ntoken, ninp, num_heads, hidden_dim, num_layers, dropout=0.5
):
super(TransformerModel, self).__init__()
self.model_type = "Transformer"
self.src_mask = None
self.pos_encoder = PositionalEncoding(ninp, dropout)
encoder_layer = TransformerEncoderLayer(
ninp, num_heads, hidden_dim, dropout
)
self.transformer_encoder = TransformerEncoder(
encoder_layer=encoder_layer,
num_layers=num_layers,
norm=LayerNorm(ninp),
)
decoder_layer = TransformerDecoderLayer(
ninp, num_heads, hidden_dim, dropout
)
self.transformer_decoder = TransformerDecoder(
decoder_layer=decoder_layer,
num_layers=num_layers,
norm=LayerNorm(ninp),
)
# Use Linear instead of Embedding for continuous valued input
self.encoder = nn.Linear(ntoken, ninp)
self.project = nn.Linear(ninp, ntoken)
self.ninp = ninp
self.init_weights()
def _generate_square_subsequent_mask(self, sz):
mask = (torch.triu(torch.ones(sz, sz)) == 1).transpose(0, 1)
mask = (
mask.float()
.masked_fill(mask == 0, float("-inf"))
.masked_fill(mask == 1, float(0.0))
)
return mask
def init_weights(self):
"""Initiate parameters in the transformer model."""
for p in self.parameters():
if p.dim() > 1:
xavier_uniform_(p)
def forward(self, src, tgt, max_len=None, teacher_forcing_ratio=None):
# Transformer expects src and tgt in format (len, batch_size, dim)
src = src.transpose(0, 1)
tgt = tgt.transpose(0, 1)
# src and tgt are now (T, B, E)
if max_len is None:
max_len = tgt.shape[0]
projected_src = self.encoder(src) * np.sqrt(self.ninp)
pos_encoded_src = self.pos_encoder(projected_src)
encoder_output = self.transformer_encoder(pos_encoded_src)
if self.training:
# Create mask for training
tgt_mask = self._generate_square_subsequent_mask(tgt.shape[0]).to(
device=tgt.device,
)
# Use last source pose as first input to decoder
tgt = torch.cat((src[-1].unsqueeze(0), tgt[:-1]))
pos_encoder_tgt = self.pos_encoder(
self.encoder(tgt) * np.sqrt(self.ninp)
)
output = self.transformer_decoder(
pos_encoder_tgt, encoder_output, tgt_mask=tgt_mask,
)
output = self.project(output)
else:
# greedy decoding
decoder_input = torch.zeros(
max_len, src.shape[1], src.shape[-1],
).type_as(src.data)
next_pose = tgt[0].clone()
# Create mask for greedy encoding across the decoded output
tgt_mask = self._generate_square_subsequent_mask(max_len).to(
device=tgt.device
)
for i in range(max_len):
decoder_input[i] = next_pose
pos_encoded_input = self.pos_encoder(
self.encoder(decoder_input) * np.sqrt(self.ninp)
)
decoder_outputs = self.transformer_decoder(
pos_encoded_input, encoder_output, tgt_mask=tgt_mask,
)
output = self.project(decoder_outputs)
next_pose = output[i].clone()
del output
output = decoder_input
return output.transpose(0, 1)
| {"hexsha": "e78504ac8549ce976b55d122784e13033af4915f", "size": 6632, "ext": "py", "lang": "Python", "max_stars_repo_path": "fairmotion/models/transformer.py", "max_stars_repo_name": "martoko/fairmotion", "max_stars_repo_head_hexsha": "13b24dc3f184d1af7874ee68e78ba74f2be83973", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "fairmotion/models/transformer.py", "max_issues_repo_name": "martoko/fairmotion", "max_issues_repo_head_hexsha": "13b24dc3f184d1af7874ee68e78ba74f2be83973", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "fairmotion/models/transformer.py", "max_forks_repo_name": "martoko/fairmotion", "max_forks_repo_head_hexsha": "13b24dc3f184d1af7874ee68e78ba74f2be83973", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.2765957447, "max_line_length": 78, "alphanum_fraction": 0.5984620024, "include": true, "reason": "import numpy", "num_tokens": 1494} |
import matplotlib.pyplot as plt
import numpy as np
import matplotlib.colors as mcolors
def make_colormap(seq):
"""Return a LinearSegmentedColormap
seq: a sequence of floats and RGB-tuples. The floats should be increasing
and in the interval (0,1).
from : http://stackoverflow.com/questions/16834861/create-own-colormap-using-matplotlib-and-plot-color-scale
example usage:
c = mcolors.ColorConverter().to_rgb
rvb = make_colormap(
[c('red'), c('violet'), 0.33, c('violet'), c('blue'), 0.66, c('blue')])
"""
# this might make it use black at the bottom and white at the very top, though it is transitioning at 0 and 1 so...
seq = [(None,) * 3, 0.0] + list(seq) + [1.0, (None,) * 3]
cdict = {'red': [], 'green': [], 'blue': []}
for i, item in enumerate(seq):
if isinstance(item, float):
if len(seq[i - 1])==3:
r1, g1, b1 = seq[i - 1]
else:
r1, g1, b1, a1 = seq[i - 1]
if len(seq[i + 1])==3:
r2, g2, b2 = seq[i + 1]
else:
r2, g2, b2, a2 = seq[i + 1]
cdict['red'].append([item, r1, r2])
cdict['green'].append([item, g1, g2])
cdict['blue'].append([item, b1, b2])
return mcolors.LinearSegmentedColormap('CustomMap', cdict)
def subset(cmap=None,clim=(0,255),step=1, bottomvalue=None):
"""docstring for subset"""
if type(cmap)==str:
cmap=plt.cm.__getattribute__(cmap)
newcmap=[]
if bottomvalue!=None:
newcmap.append(bottomvalue)
newcmap.append(0)
for i in range(clim[0],clim[1]+step,step):
newcmap.append(cmap(i))
if (i<clim[1]):
newcmap.append(float(i-clim[0])/(clim[1]-clim[0]))
return make_colormap(newcmap)
def terrain():
"""docstring for terrain"""
return subset(plt.cm.terrain,clim=(55,255),bottomvalue=(0.1,0.4,0.9))
def flatten_short_axis(data):
if data.shape[0]>data.shape[1]:
return data.mean(axis=1)
else:
return data.mean(axis=0)
def med_filter(data, filtersize):
sz=5
if type(filtersize)==int:
sz = filtersize
tmp = np.zeros(data.shape)
for i in range(len(data)):
top = min(len(data), i+sz)
bottom = max(0, i-sz)
tmp[i] = np.median(data[bottom:top,:],axis=0)
return tmp
def from_image(filename, reverse=False, startpt=0, endpt=None, median_filter=None):
data = plt.imread(filename)
data = flatten_short_axis(data)
if median_filter != None:
data = med_filter(data, median_filter)
data = data[startpt:endpt]
size = data.shape[0]
if reverse:
data = data[::-1]
colors = [tuple(data[0])]
for i in range(1,size):
colors.extend([float(i)/size, tuple(data[i])])
return make_colormap(colors)
| {"hexsha": "850af61737ea1cc7f7f7d728e753ed3859e41cba", "size": 2934, "ext": "py", "lang": "Python", "max_stars_repo_path": "helpers/lib/custom_cmap.py", "max_stars_repo_name": "scrasmussen/icar", "max_stars_repo_head_hexsha": "88c59fed7595b176a81127993785fdeb514f28a3", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 61, "max_stars_repo_stars_event_min_datetime": "2016-03-15T18:57:19.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-30T03:00:55.000Z", "max_issues_repo_path": "helpers/lib/custom_cmap.py", "max_issues_repo_name": "scrasmussen/icar", "max_issues_repo_head_hexsha": "88c59fed7595b176a81127993785fdeb514f28a3", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 42, "max_issues_repo_issues_event_min_datetime": "2016-03-17T16:10:59.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-23T19:57:09.000Z", "max_forks_repo_path": "helpers/lib/custom_cmap.py", "max_forks_repo_name": "scrasmussen/icar", "max_forks_repo_head_hexsha": "88c59fed7595b176a81127993785fdeb514f28a3", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 50, "max_forks_repo_forks_event_min_datetime": "2015-12-09T18:13:47.000Z", "max_forks_repo_forks_event_max_datetime": "2021-12-09T02:29:35.000Z", "avg_line_length": 29.9387755102, "max_line_length": 119, "alphanum_fraction": 0.5681663258, "include": true, "reason": "import numpy", "num_tokens": 851} |
import argparse
from augur.utils import write_json
import Bio.SeqIO
from collections import OrderedDict
import hdbscan
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import re
from scipy.spatial.distance import squareform, pdist
import seaborn as sns
from sklearn.decomposition import PCA
from sklearn.manifold import TSNE, MDS
import sys
from umap import UMAP
from Helpers import get_hamming_distances, get_euclidean_data_frame
if __name__ == "__main__":
parser = argparse.ArgumentParser(description = "creates embeddings", formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument("--distance-matrix", help="a csv distance matrix that can be read in by pandas, index column as row 0")
parser.add_argument("--alignment", help="an aligned FASTA file to create a distance matrix with")
parser.add_argument("--cluster-data", help="cluster data from embedding and assign labels given via HDBSCAN")
parser.add_argument("--cluster-threshold", type=float, help="cluster data from embedding and assign labels given via HDBSCAN. Pass in a threshold.")
parser.add_argument("--random-seed", default = 314159, type=int, help="an integer used as the random seed for reproducible results")
parser.add_argument("--output-node-data", help="outputting a node data JSON file")
parser.add_argument("--output-dataframe", help="outputting a csv file")
parser.add_argument("--output-figure", help="plot of the embedding, for debugging purposes")
#parser.add_argument("--method-params" help="csv file from grid search")
#if method params exists and command line exists, command line overrides it
subparsers = parser.add_subparsers(
dest="command",
required=True
)
pca = subparsers.add_parser("pca")
pca.add_argument("--components", default=10, type=int, help="the number of components for PCA")
pca.add_argument("--explained-variance", default="results/explained_variance_pca.png", help="the path for the explained variance table")
tsne = subparsers.add_parser("t-sne")
tsne.add_argument("--perplexity", default=30.0, type=float, help="the perplexity value for the tsne embedding")
tsne.add_argument("--learning-rate", default=200.0, type=float, help="the learning rate value for the tsne embedding")
umap = subparsers.add_parser("umap")
umap.add_argument("--nearest-neighbors", default=200, type=int, help="the nearest neighbors value for the umap embedding")
umap.add_argument("--min-dist", default=.5, type=float, help="the minimum distance value for the umap embedding")
mds = subparsers.add_parser("mds")
mds.add_argument("--components", default=10, type=int, help="the number of components for MDS")
args = parser.parse_args()
# Checking that the input fits the restrictions
# Setting Random seed for numpy
np.random.seed(seed=args.random_seed)
if args.output_node_data is None and args.output_dataframe is None:
print("You must specify one of the outputs", file=sys.stderr)
sys.exit(1)
if args.alignment is None and args.command == "pca":
print("You must specify an alignment for pca, not a distance matrix", file=sys.stderr)
sys.exit(1)
# getting or creating the distance matrix
if args.distance_matrix is not None:
distance_matrix = pd.read_csv(args.distance_matrix, index_col=0)
elif args.alignment is not None:
sequences_by_name = OrderedDict()
for sequence in Bio.SeqIO.parse(args.alignment, "fasta"):
sequences_by_name[sequence.id] = str(sequence.seq)
sequence_names = list(sequences_by_name.keys())
if args.command != "pca":
# Calculate Distance Matrix
hamming_distances = get_hamming_distances(
sequences_by_name.values()
)
distance_matrix = pd.DataFrame(squareform(hamming_distances))
distance_matrix.index = sequence_names
# Calculate Embedding
if args.command == "pca":
sequences_by_name = OrderedDict()
for sequence in Bio.SeqIO.parse(args.alignment, "fasta"):
sequences_by_name[sequence.id] = str(sequence.seq)
sequence_names = list(sequences_by_name.keys())
numbers = list(sequences_by_name.values())[:]
for i in range(0,len(list(sequences_by_name.values()))):
numbers[i] = re.sub(r'[^AGCT]', '5', numbers[i])
numbers[i] = list(numbers[i].replace('A','1').replace('G','2').replace('C', '3').replace('T','4'))
numbers[i] = [int(j) for j in numbers[i]]
genomes_df = pd.DataFrame(numbers)
genomes_df.columns = ["Site " + str(k) for k in range(0,len(numbers[i]))]
#performing PCA on my pandas dataframe
pca = PCA(n_components=args.components,svd_solver='full') #can specify n, since with no prior knowledge, I use None
principalComponents = pca.fit_transform(genomes_df)
# Create a data frame from the PCA embedding.
embedding = principalComponents
embedding_df = pd.DataFrame(principalComponents)
embedding_df.index = sequence_names
if args.command == "t-sne":
embedding_class = TSNE
embedding_parameters = {
"metric": "precomputed",
"perplexity": args.perplexity,
"learning_rate": args.learning_rate,
"random_state" : args.random_seed,
"square_distances": True,
}
elif args.command == "umap":
embedding_class = UMAP
embedding_parameters = {
"n_neighbors": args.nearest_neighbors,
"min_dist": args.min_dist,
"n_components": 2,
"init": "spectral",
"random_state" : args.random_seed
}
elif args.command == "mds":
embedding_class = MDS
embedding_parameters = {
"dissimilarity": "precomputed",
"n_components": args.components,
"n_jobs": 1,
"n_init": 2,
"random_state": args.random_seed
}
if args.command != "pca":
embedder = embedding_class(**embedding_parameters)
embedding = embedder.fit_transform(distance_matrix)
print(embedding)
# Output Embedding
# create dictionary to be "wrapped" by write_json
embedding_df = pd.DataFrame(embedding)
embedding_df.index = list(distance_matrix.index)
if args.command == "mds" or args.command == "pca":
embedding_df.columns=[args.command + str(i) for i in range(1,args.components + 1)]
else:
embedding_df.columns = [args.command.replace('-', '') + "_x" , args.command.replace('-', '') + "_y"]
if args.command == "pca":
#add explained variance as the first row of the dataframe
explained_variance = pd.DataFrame([round(pca.explained_variance_ratio_[i],4) for i in range(0,len(pca.explained_variance_ratio_))], columns=["explained variance"])
explained_variance["principal components"] = [i for i in range(1, args.components + 1)]
explained_variance.to_csv(args.explained_variance, index=False)
clusterer = None
if args.cluster_threshold is not None:
cluster = float(args.cluster_threshold)
clusterer = hdbscan.HDBSCAN(cluster_selection_epsilon=float(cluster))
elif args.cluster_data is not None:
max_df = pd.read_csv(args.cluster_data)
clusterer = hdbscan.HDBSCAN(cluster_selection_epsilon=float(max_df.where(max_df["method"] == args.command).dropna(subset = ['distance_threshold'])[["distance_threshold"]].values.tolist()[0][0]))
if clusterer is not None:
clusterer_default = hdbscan.HDBSCAN()
clusterer.fit(embedding_df)
clusterer_default.fit(embedding_df)
embedding_df[f"{args.command}_label"] = clusterer.labels_.astype(str)
embedding_df[f"{args.command}_label_default"] = clusterer_default.labels_.astype(str)
if args.output_node_data is not None:
embedding_dict = embedding_df.transpose().to_dict()
write_json({"nodes": embedding_dict}, args.output_node_data)
if args.output_dataframe is not None:
embedding_df.to_csv(args.output_dataframe, index_label="strain")
if args.output_figure:
plot_data = {
"x": embedding[:, 0],
"y": embedding[:, 1],
}
if clusterer is not None:
plot_data["cluster"] = clusterer.labels_.astype(str)
else:
plot_data["cluster"] = "0"
plot_df = pd.DataFrame(plot_data)
ax = sns.scatterplot(
data=plot_df,
x="x",
y="y",
hue="cluster",
alpha=0.5,
)
plt.savefig(args.output_figure)
plt.close()
| {"hexsha": "969f9e95f44522202f869c675b6eec427d148afe", "size": 8809, "ext": "py", "lang": "Python", "max_stars_repo_path": "notebooks/scripts/embed.py", "max_stars_repo_name": "blab/cartography", "max_stars_repo_head_hexsha": "36ef6408e7bdb73b59003166ad7725662fd8fbe6", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-11-19T14:23:29.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-19T14:23:29.000Z", "max_issues_repo_path": "notebooks/scripts/embed.py", "max_issues_repo_name": "blab/cartography", "max_issues_repo_head_hexsha": "36ef6408e7bdb73b59003166ad7725662fd8fbe6", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 17, "max_issues_repo_issues_event_min_datetime": "2021-07-14T17:44:02.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-11T18:43:46.000Z", "max_forks_repo_path": "notebooks/scripts/embed.py", "max_forks_repo_name": "blab/cartography", "max_forks_repo_head_hexsha": "36ef6408e7bdb73b59003166ad7725662fd8fbe6", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-04-11T15:23:09.000Z", "max_forks_repo_forks_event_max_datetime": "2020-04-11T15:23:09.000Z", "avg_line_length": 41.1635514019, "max_line_length": 202, "alphanum_fraction": 0.6665909865, "include": true, "reason": "import numpy,from scipy", "num_tokens": 1961} |
double precision function fdisc(blockno,xc,yc)
implicit none
double precision xc,yc, xp, yp, zp
integer blockno
integer*8 cont, get_context
double precision pi, pi2
common /compi/ pi, pi2
double precision beta, theta(2)
common /annulus_comm/ beta, theta
double precision init_radius
common /initradius_comm/ init_radius
double precision x0, y0, r0, r, ravg, th
cont = get_context()
call fclaw2d_map_c2m(cont,
& blockno,xc,yc,xp,yp,zp)
ravg = (1 + beta)/2.d0
th = pi2*(0.25 + 1.d0/32.d0)
x0 = ravg*cos(th)
y0 = ravg*sin(th)
r = sqrt((xp - x0)**2 + (yp-y0)**2)
r0 = init_radius
fdisc = r - r0
end
| {"hexsha": "64c795200f6a3b6a2563b5cce0f2376cf23640c5", "size": 750, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "applications/paper/annulus/fdisc.f", "max_stars_repo_name": "scivision/forestclaw", "max_stars_repo_head_hexsha": "4dae847a8abd1055e70acda9c2973cd3dd77d3f1", "max_stars_repo_licenses": ["BSD-2-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "applications/paper/annulus/fdisc.f", "max_issues_repo_name": "scivision/forestclaw", "max_issues_repo_head_hexsha": "4dae847a8abd1055e70acda9c2973cd3dd77d3f1", "max_issues_repo_licenses": ["BSD-2-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "applications/paper/annulus/fdisc.f", "max_forks_repo_name": "scivision/forestclaw", "max_forks_repo_head_hexsha": "4dae847a8abd1055e70acda9c2973cd3dd77d3f1", "max_forks_repo_licenses": ["BSD-2-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 20.8333333333, "max_line_length": 53, "alphanum_fraction": 0.5733333333, "num_tokens": 243} |
import pandas as pd
from ecommercetools.utilities import tools
import numpy as np
np.seterr(divide='ignore')
def get_products(transaction_items, days=None):
"""Return a Pandas DataFrame of products from a Pandas DataFrame of transaction items.
Args:
transaction_items (object): Pandas DataFrame.
days (int, optional): Select only product sold in the last X days.
Returns:
customers (object): Pandas DataFrame
"""
if days:
transaction_items = tools.select_last_x_days(transaction_items, 'order_date', days)
transaction_items = transaction_items.assign(line_price=transaction_items['quantity'] * transaction_items['unit_price'])
products = transaction_items.groupby('sku').agg(
first_order_date=('order_date', 'min'),
last_order_date=('order_date', 'max'),
customers=('customer_id', 'nunique'),
orders=('order_id', 'nunique'),
items=('quantity', 'sum'),
revenue=('line_price', 'sum'),
avg_unit_price=('unit_price', 'mean'),
avg_quantity=('quantity', 'mean'),
avg_revenue=('line_price', 'mean')
).reset_index()
products['avg_orders'] = round(products['orders'] / products['customers'], 2)
products['product_tenure'] = (pd.to_datetime('today') - products['first_order_date']).dt.days
products['product_recency'] = (pd.to_datetime('today') - products['last_order_date']).dt.days
return products
def get_repurchase_rate_label(df):
"""Add a label describing the repurchase rate bin.
Args:
df (object): Pandas DataFrame containing repurchase_rate.
Returns:
-------
df (object): Pandas DataFrame with repurchase_rate_label added.
"""
labels = ['Very low repurchase',
'Low repurchase',
'Moderate repurchase',
'High repurchase',
'Very high repurchase']
df['repurchase_rate_label'] = pd.cut(df['repurchase_rate'],
bins=5,
labels=labels)
return df
def get_bulk_purchase_rate_label(df):
"""Add a label describing the bulk purchase rate bin.
Args:
df (object): Pandas DataFrame containing bulk_purchase_rate.
Returns:
-------
df (object): Pandas DataFrame with bulk_purchase_rate_label added.
"""
labels = ['Very low bulk',
'Low bulk',
'Moderate bulk',
'High bulk',
'Very high bulk']
df['bulk_purchase_rate_label'] = pd.cut(df['bulk_purchase_rate'],
bins=5,
labels=labels)
return df
def get_skus_per_order_label(df):
"""Add a label describing the skus per order bin.
Args:
df (object): Pandas DataFrame containing avg_skus_per_order.
Returns:
-------
df (object): Pandas DataFrame with avg_skus_per_order added.
"""
labels = ['Very poor basket ',
'Poor basket',
'Average basket',
'Big basket',
'Very big basket']
df['avg_skus_per_order_label'] = pd.cut(df['avg_skus_per_order_rate'],
bins=5,
labels=labels)
return df
def get_repurchase_rates(df):
"""Return repurchase rates and purchase behaviour for each SKU from transaction items data.
Given a Pandas DataFrame of transactional items, this function returns a Pandas DataFrame
containing the purchase behaviour and repurchase behaviour for each SKU.
Args:
df (object): Pandas DataFrame. Required columns: sku, order_id, customer_id, quantity, unit_price.
Returns:
-------
df (object): Pandas DataFrame.
"""
# Count the number of times each customer purchased each SKU
df['times_purchased'] = df.groupby(['sku', 'customer_id'])['order_id'].transform('count')
# Count the number of times the SKU was purchased individually within orders
df['purchased_individually'] = df[df['quantity'] == 1]. \
groupby('sku')['order_id'].transform('count')
df['purchased_individually'] = df['purchased_individually'].fillna(0)
# Count the number of times the SKU was purchased once only by customers
df['purchased_once'] = df[df['times_purchased'] == 1]. \
groupby('sku')['order_id'].transform('count')
df['purchased_once'] = df['purchased_once'].fillna(0)
# Calculate line price
df['line_price'] = df['unit_price'] * df['quantity']
# Calculate unique SKU per order
df['skus_per_order'] = df.groupby('order_id')['sku'].transform('count')
# Calculate basket turnover per order
df['basket_turnover'] = df.groupby('order_id')['line_price'].transform('sum')
# Get unique SKUs and count total items, orders, and customers
df_skus = df.groupby('sku').agg(
revenue=('line_price', 'sum'),
items=('quantity', 'sum'),
orders=('order_id', 'nunique'),
customers=('customer_id', 'nunique'),
avg_unit_price=('unit_price', 'mean'),
avg_line_price=('line_price', 'mean'),
avg_skus_per_order=('skus_per_order', 'mean'),
avg_order_value=('basket_turnover', 'mean')
)
# Calculate the average number of units per order
df_skus = df_skus.assign(avg_items_per_order=(df_skus['items'] / df_skus['orders']))
# Calculate the average number of items per customer
df_skus = df_skus.assign(avg_items_per_customer=(df_skus['items'] / df_skus['customers']))
# Merge the dataframes
df_subset = df[['sku', 'purchased_individually', 'purchased_once']].fillna(0)
df_subset.drop_duplicates('sku', keep='first', inplace=True)
df_skus = df_skus.merge(df_subset, on='sku', how='left')
# Calculate bulk purchase rates
df_skus = df_skus.assign(bulk_purchases=(df_skus['orders'] - df_skus['purchased_individually']))
df_skus = df_skus.assign(bulk_purchase_rate=(df_skus['bulk_purchases'] / df_skus['orders']))
# Calculate repurchase rates
df_skus = df_skus.assign(repurchases=(df_skus['orders'] - df_skus['purchased_once']))
df_skus = df_skus.assign(repurchase_rate=(df_skus['repurchases'] / df_skus['orders']))
df_skus = df_skus.assign(avg_skus_per_order_rate=(np.log(df_skus['avg_skus_per_order'])))
df_skus = df_skus.assign(avg_order_value_rate=(np.log(df_skus['avg_order_value'])))
# Add labels
df_skus = get_repurchase_rate_label(df_skus)
df_skus = get_bulk_purchase_rate_label(df_skus)
df_skus = get_skus_per_order_label(df_skus)
df_skus['bulk_and_repurchase_label'] = df_skus['repurchase_rate_label'].astype(str) + \
'_' + df_skus['bulk_purchase_rate_label'].astype(str)
return df_skus
| {"hexsha": "5536942fd2fce8bece6f08faf5f52f8ada50383d", "size": 6860, "ext": "py", "lang": "Python", "max_stars_repo_path": "ecommercetools/products/products.py", "max_stars_repo_name": "ruber0id/ecommercetools", "max_stars_repo_head_hexsha": "c34582bf6ad3392fe9a3d05c92cf96dbf435d0a1", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "ecommercetools/products/products.py", "max_issues_repo_name": "ruber0id/ecommercetools", "max_issues_repo_head_hexsha": "c34582bf6ad3392fe9a3d05c92cf96dbf435d0a1", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "ecommercetools/products/products.py", "max_forks_repo_name": "ruber0id/ecommercetools", "max_forks_repo_head_hexsha": "c34582bf6ad3392fe9a3d05c92cf96dbf435d0a1", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.8817204301, "max_line_length": 124, "alphanum_fraction": 0.6379008746, "include": true, "reason": "import numpy", "num_tokens": 1585} |
#
# File: CNN_inferece.py
# Date:11.08.2018
# Author: Denis Tananaev
#
#
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow.python.framework import ops
import scipy.misc
from numpy import newaxis
from PIL import Image
#import math
#import os.path
import time
import numpy as np
import CNN.model as model
import glob
import os
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
import CNN.load_image as load_image
import numpngw
#parameters
SaveDepthRealValued=False
CHECKPOINT_DIR="./CNN_checkpoint/"
data_folder="./example/"
result_folder="./example/"
def inverse(depth):
inverse=tf.divide(tf.ones_like(depth),depth)
inverse=tf.where(tf.is_nan(inverse),tf.zeros_like(inverse),inverse)
return inverse
def save_depth(result,config,saver,output_path,counter):
with tf.Session(config=config) as sess:
ckpt = tf.train.get_checkpoint_state(CHECKPOINT_DIR)
if ckpt and ckpt.model_checkpoint_path:
# Restores from checkpoint
saver.restore(sess, ckpt.model_checkpoint_path)
#Assuming model_checkpoint_path looks something like:
# extract global_step from it.
global_step = ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1]
print('Checkpoint is loaded')
else:
print('No checkpoint file found')
return
# Start the queue runners.
coord = tf.train.Coordinator()
try:
threads = []
for qr in tf.get_collection(tf.GraphKeys.QUEUE_RUNNERS):
threads.extend(qr.create_threads(sess, coord=coord, daemon=True,
start=True))
start_time = time.time()
res=sess.run([result])
duration = time.time() - start_time
print("Loading model and evaluation took (seconds): ",duration)
res=np.array(res)
print("Minimum value of depth (meters): ",np.min(res))
print("Maximum value of depth (meters): ",np.max(res))
res*=1000
if counter<10:
image_name="CNN_depth_0"+str(counter)+".png"
else:
image_name="CNN_depth_"+str(counter)+".png"
name=output_path+image_name
depth=np.array(res[0,0,:,:,0],dtype=np.uint16)
if SaveDepthRealValued==True:
numpngw.write_png(name,depth)
else:
scipy.misc.toimage(depth, cmin=0, cmax=5000).save(name)
except Exception as e: # pylint: disable=broad-except
coord.request_stop(e)
coord.request_stop()
coord.join(threads, stop_grace_period_secs=10)
def predict(input_path,output_path,image_name,counter):
#get input image
ops.reset_default_graph()
with tf.Graph().as_default() as g:
input_image=load_image.input_image(image_name)
scale1_depth, scale2_depth, depth,scale1_normal,scale2_normal = model.inference(input_image)
result = inverse(depth)
saver = tf.train.Saver()
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.7)
config = tf.ConfigProto(gpu_options=gpu_options)
save_depth(result,config,saver,output_path,counter)
def main(argv=None):
images=glob.glob(data_folder+"i_*.png")
images=sorted(images)
for i in range(len(images)):
predict(data_folder,result_folder,images[i],i)
if __name__ == '__main__':
tf.app.run()
| {"hexsha": "1110f6ea6cf510ead8247784d0eb09981717ebff", "size": 3607, "ext": "py", "lang": "Python", "max_stars_repo_path": "CNN_inference.py", "max_stars_repo_name": "Dtananaev/tf_lstm_depth", "max_stars_repo_head_hexsha": "94f83e8671e8928eba24eac6936a02cd9d123686", "max_stars_repo_licenses": ["BSD-2-Clause"], "max_stars_count": 7, "max_stars_repo_stars_event_min_datetime": "2018-09-30T10:00:22.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-23T12:32:48.000Z", "max_issues_repo_path": "CNN_inference.py", "max_issues_repo_name": "Dtananaev/tf_lstm_depth", "max_issues_repo_head_hexsha": "94f83e8671e8928eba24eac6936a02cd9d123686", "max_issues_repo_licenses": ["BSD-2-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "CNN_inference.py", "max_forks_repo_name": "Dtananaev/tf_lstm_depth", "max_forks_repo_head_hexsha": "94f83e8671e8928eba24eac6936a02cd9d123686", "max_forks_repo_licenses": ["BSD-2-Clause"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-07-08T12:46:40.000Z", "max_forks_repo_forks_event_max_datetime": "2020-07-08T12:46:40.000Z", "avg_line_length": 30.8290598291, "max_line_length": 96, "alphanum_fraction": 0.6501247574, "include": true, "reason": "import numpy,from numpy,import scipy", "num_tokens": 820} |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = "Han"
__email__ = "[email protected]"
import logging
import numpy as np
import torch.nn
import torch.cuda
from game.models.layers import *
from utils.functions import compute_mask, del_zeros_right, compute_top_layer_mask
from game.tokenizer import Vocabulary
logger = logging.getLogger(__name__)
class DocRepPTTrainModel(torch.nn.Module):
"""
Documents representation model
Args:
model_config: config
embedding_path: embeddings path
Inputs:
tar_d: (batch, doc_sent_len, doc_word_len)
cand_d: (batch, cand_doc_num, doc_sent_len, doc_word_len)
Outputs:
cand_d_prop: (batch, cand_doc_num)
"""
def __init__(self, model_config, embedding_path=None, embedding_freeze=True):
super(DocRepPTTrainModel, self).__init__()
self.model_config = model_config
embedding_num = model_config['embedding_num']
embedding_dim = model_config['embedding_dim']
self.hidden_size = model_config['hidden_size']
dropout_p = model_config['dropout_p']
enable_layer_norm = model_config['layer_norm']
self.doc_hierarchical = model_config['doc_hierarchical']
if not model_config['use_glove']:
self.embedding_layer = torch.nn.Embedding(num_embeddings=embedding_num,
embedding_dim=embedding_dim,
padding_idx=Vocabulary.PAD_IDX)
else:
embedding_weight = torch.tensor(np.load(embedding_path), dtype=torch.float32)
logger.info('Embedding shape: ' + str(embedding_weight.shape))
self.embedding_layer = torch.nn.Embedding.from_pretrained(embedding_weight,
freeze=embedding_freeze,
padding_idx=Vocabulary.PAD_IDX)
self.tar_doc_encoder = DocRepPTEncoder(embedding_dim, self.hidden_size, dropout_p, enable_layer_norm)
self.cand_doc_encoder = DocRepPTEncoder(embedding_dim, self.hidden_size, dropout_p, enable_layer_norm)
# self.tar_doc_encoder = TransformerModel(nemb=embedding_dim,
# nhead=2,
# nhid=200,
# nlayers=2,
# dropout=dropout_p)
# self.cand_doc_encoder = TransformerModel(nemb=embedding_dim,
# nhead=2,
# nhid=200,
# nlayers=2,
# dropout=dropout_p)
def forward(self, tar_d, cand_ds):
tar_d, _ = del_zeros_right(tar_d)
cand_ds, _ = del_zeros_right(cand_ds)
if self.doc_hierarchical:
_, sent_right_idx = del_zeros_right(tar_d.sum(-1))
tar_d = tar_d[:, :sent_right_idx, :]
_, sent_right_idx = del_zeros_right(cand_ds.sum(-1))
cand_ds = cand_ds[:, :, :sent_right_idx, :]
# embedding layer
tar_doc_emb = self.embedding_layer(tar_d)
tar_doc_mask = compute_mask(tar_d)
cand_docs_emb = self.embedding_layer(cand_ds)
cand_docs_mask = compute_mask(cand_ds)
# target document encoder layer
tar_doc_rep, _ = self.tar_doc_encoder(tar_doc_emb, tar_doc_mask)
# candidate documents encoder layer
batch, cand_doc_num = cand_docs_emb.size(0), cand_docs_emb.size(1)
new_size = [batch * cand_doc_num] + list(cand_docs_emb.shape[2:])
cand_docs_emb_flip = cand_docs_emb.view(*new_size)
new_size = [batch * cand_doc_num] + list(cand_ds.shape[2:])
cand_docs_mask_flip = cand_docs_mask.view(*new_size)
cand_docs_rep_flip, _ = self.cand_doc_encoder(cand_docs_emb_flip, cand_docs_mask_flip)
cand_docs_rep = cand_docs_rep_flip.contiguous().view(batch, cand_doc_num, -1)
# output layer
cand_scores = torch.bmm(tar_doc_rep.unsqueeze(1),
cand_docs_rep.transpose(1, 2)).squeeze(1) # (batch, cand_doc_num)
cand_logits = torch.log_softmax(cand_scores, dim=-1)
return cand_logits
class DocRepPTTestModel(torch.nn.Module):
"""
Documents representation out model
Args:
model_config: config
embedding_path: embeddings path
Inputs:
doc: (batch, doc_sent_len, doc_word_len)
Outputs:
document_rep: (batch, hidden_size * 4)
"""
def __init__(self, model_config, embedding_path=None, embedding_freeze=True):
super(DocRepPTTestModel, self).__init__()
self.model_config = model_config
embedding_num = model_config['embedding_num']
embedding_dim = model_config['embedding_dim']
self.hidden_size = model_config['hidden_size']
dropout_p = model_config['dropout_p']
enable_layer_norm = model_config['layer_norm']
if not model_config['use_glove']:
self.embedding_layer = torch.nn.Embedding(num_embeddings=embedding_num,
embedding_dim=embedding_dim,
padding_idx=Vocabulary.PAD_IDX)
else:
embedding_weight = torch.tensor(np.load(embedding_path), dtype=torch.float32)
logger.info('Embedding shape: ' + str(embedding_weight.shape))
self.embedding_layer = torch.nn.Embedding.from_pretrained(embedding_weight,
freeze=embedding_freeze,
padding_idx=Vocabulary.PAD_IDX)
self.tar_doc_encoder = DocRepPTEncoder(embedding_dim, self.hidden_size, dropout_p, enable_layer_norm)
self.cand_doc_encoder = DocRepPTEncoder(embedding_dim, self.hidden_size, dropout_p, enable_layer_norm)
# self.tar_doc_encoder = TransformerModel(nemb=embedding_dim,
# nhead=2,
# nhid=200,
# nlayers=2,
# dropout=dropout_p)
# self.cand_doc_encoder = TransformerModel(nemb=embedding_dim,
# nhead=2,
# nhid=200,
# nlayers=2,
# dropout=dropout_p)
def forward(self, doc):
doc, _ = del_zeros_right(doc)
_, sent_right_idx = del_zeros_right(doc.sum(-1))
doc = doc[:, :sent_right_idx, :]
# embedding layer
doc_emb = self.embedding_layer(doc)
doc_mask = compute_mask(doc)
# doc encoder layer
tar_doc_rep, _ = self.tar_doc_encoder(doc_emb, doc_mask)
cand_doc_rep, _ = self.cand_doc_encoder(doc_emb, doc_mask)
# doc representation
doc_rep = torch.cat([tar_doc_rep, cand_doc_rep], dim=-1)
return doc_rep
class DocRepPTEncoder(torch.nn.Module):
"""
Documents representation model
Inputs:
doc_emb: (batch, doc_sent_len, doc_word_len, emb_dim)
doc_mask: (batch, doc_sent_len, doc_word_len)
Outputs:
doc_rep: (batch, hidden_size * 2)
"""
def __init__(self, embedding_dim, hidden_size, dropout_p, enable_layer_norm):
super(DocRepPTEncoder, self).__init__()
self.hidden_size = hidden_size
self.dropout_layer = torch.nn.Dropout(p=dropout_p)
self.doc_word_rnn = MyRNNBase(mode='GRU',
input_size=embedding_dim,
hidden_size=self.hidden_size,
bidirectional=True,
dropout_p=dropout_p,
enable_layer_norm=enable_layer_norm,
batch_first=True,
num_layers=1)
self.doc_word_attention = SelfAttention(hidden_size=self.hidden_size * 2)
self.doc_sentence_rnn = MyRNNBase(mode='GRU',
input_size=self.hidden_size * 2,
hidden_size=self.hidden_size,
bidirectional=True,
dropout_p=dropout_p,
enable_layer_norm=enable_layer_norm,
batch_first=True,
num_layers=1)
self.doc_sentence_attention = SelfAttention(hidden_size=self.hidden_size * 2)
def forward(self, doc_emb, doc_mask):
visual_parm = {}
batch, doc_sent_len, doc_word_len, _ = doc_emb.size()
doc_word_emb = doc_emb.view(batch * doc_sent_len, doc_word_len, -1)
doc_word_mask = doc_mask.view(batch * doc_sent_len, doc_word_len)
# (batch * doc_sent_len, doc_word_len, hidden_size * 2)
doc_word_rep, _ = self.doc_word_rnn(doc_word_emb, doc_word_mask)
# (batch * doc_sent_len, hidden_size * 2)
doc_sent_emb, doc_word_att_p = self.doc_word_attention(doc_word_rep, doc_word_mask)
visual_parm['doc_word_att_p'] = doc_word_att_p
# (batch, doc_sent_len, hidden_size * 2)
doc_sent_emb = doc_sent_emb.view(batch, doc_sent_len, -1)
doc_sent_mask = compute_top_layer_mask(doc_mask)
# (batch, doc_sent_len, hidden_size * 2)
doc_sent_rep, _ = self.doc_sentence_rnn(doc_sent_emb, doc_sent_mask)
# (batch, hidden_size * 2)
doc_rep, doc_sent_att_p = self.doc_sentence_attention(doc_sent_rep, doc_sent_mask)
visual_parm['doc_sent_att_p'] = doc_sent_att_p
return doc_rep, visual_parm
| {"hexsha": "0e41183e623af0e1acc95d1600bc9caf64b61d96", "size": 10165, "ext": "py", "lang": "Python", "max_stars_repo_path": "game/models/doc_rep_pt.py", "max_stars_repo_name": "laddie132/MD3", "max_stars_repo_head_hexsha": "3df45918e33437e9a2309f7965f34f3a75621059", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 6, "max_stars_repo_stars_event_min_datetime": "2021-02-07T03:20:29.000Z", "max_stars_repo_stars_event_max_datetime": "2021-04-09T03:34:51.000Z", "max_issues_repo_path": "game/models/doc_rep_pt.py", "max_issues_repo_name": "laddie132/MD3", "max_issues_repo_head_hexsha": "3df45918e33437e9a2309f7965f34f3a75621059", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "game/models/doc_rep_pt.py", "max_forks_repo_name": "laddie132/MD3", "max_forks_repo_head_hexsha": "3df45918e33437e9a2309f7965f34f3a75621059", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-12-14T15:42:40.000Z", "max_forks_repo_forks_event_max_datetime": "2021-12-14T15:42:40.000Z", "avg_line_length": 41.1538461538, "max_line_length": 110, "alphanum_fraction": 0.5722577472, "include": true, "reason": "import numpy", "num_tokens": 2062} |
using SymPy
wmotor1 = Sym("wmotor1");
wmotor2 = Sym("wmotor2");
wmotor3 = Sym("wmotor3");
wmotor4 = Sym("wmotor4");
## Propeller Forces
keng = 6.11e-3;
f1_body = [0;0;-keng*wmotor1];
f2_body = [0;0;-keng*wmotor2];
f3_body = [0;0;-keng*wmotor3];
f4_body = [0;0;-keng*wmotor4];
## Propeller Moments
kmeng = 1.5e-4;
m1_body = [0;0;-kmeng*wmotor1];
m2_body = [0;0; kmeng*wmotor2];
m3_body = [0;0;-kmeng*wmotor3];
m4_body = [0;0; kmeng*wmotor4];
## Moment arm from CG to Propeller Hub;
Larm = 0.15;
r1_arm_body = [ Larm*cosd(45); -Larm*sind(45); 0.0];
r2_arm_body = [ Larm*cosd(45); Larm*sind(45); 0.0];
r3_arm_body = [-Larm*cosd(45); Larm*sind(45); 0.0];
r4_arm_body = [-Larm*cosd(45); -Larm*sind(45); 0.0];
## Total Moment due to Motors
m1_body = m1_body + cross(r1_arm_body, f1_body);
m2_body = m2_body + cross(r2_arm_body, f2_body);
m3_body = m3_body + cross(r3_arm_body, f3_body);
m4_body = m4_body + cross(r4_arm_body, f4_body);
mtot = m1_body + m2_body + m3_body + m4_body;
ftot = f1_body + f2_body + f3_body + f4_body;
AA = jacobian([mtot;ftot[3]], [wmotor1, wmotor2, wmotor3, wmotor4]);
| {"hexsha": "6888150c592b510b3f92d351aa6157a9494a8d84", "size": 1101, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "example/QuadRotor/ComputeMixingLogic.jl", "max_stars_repo_name": "jg110/Aerospace.jl", "max_stars_repo_head_hexsha": "48998ecaabf7b052793a44205e5482301ac32822", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-11-21T21:54:46.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-21T21:54:46.000Z", "max_issues_repo_path": "example/QuadRotor/ComputeMixingLogic.jl", "max_issues_repo_name": "jg110/Aerospace.jl", "max_issues_repo_head_hexsha": "48998ecaabf7b052793a44205e5482301ac32822", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "example/QuadRotor/ComputeMixingLogic.jl", "max_forks_repo_name": "jg110/Aerospace.jl", "max_forks_repo_head_hexsha": "48998ecaabf7b052793a44205e5482301ac32822", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 28.2307692308, "max_line_length": 68, "alphanum_fraction": 0.6721162579, "num_tokens": 474} |
import os
import pickle as pkl
from tqdm import tqdm
import numpy as np
from scipy import stats
import matplotlib.pyplot as plt
import pandas as pd
import ujson as json
import nltk
from nltk.tokenize import word_tokenize
from time import time
np.random.seed(int(time()))
SPACE = ' '
def stat_length(seq_length):
print('Seq len info :')
seq_len = np.asarray(seq_length)
idx = np.arange(0, len(seq_len), dtype=np.int32)
print(stats.describe(seq_len))
plt.figure(figsize=(16, 9))
plt.subplot(121)
plt.plot(idx[:], seq_len[:], 'ro')
plt.grid(True)
plt.xlabel('index')
plt.ylabel('seq_len')
plt.title('Scatter Plot')
plt.subplot(122)
plt.hist(seq_len, bins=10, label=['seq_len'])
plt.grid(True)
plt.xlabel('seq_len')
plt.ylabel('freq')
plt.title('Histogram')
plt.show()
def stat_altlex(eng_sentences, sim_sentences, labels):
c_alt, nc_alt = [], []
for eng, sim, label in zip(eng_sentences, sim_sentences, labels):
if label == 0:
nc_alt.append(' '.join(w for w in eng[1]))
nc_alt.append(' '.join(w for w in sim[1]))
else:
c_alt.append(' '.join(w for w in eng[1]))
c_alt.append(' '.join(w for w in sim[1]))
c_alt_set = set(c_alt)
nc_alt_set = set(nc_alt)
co_alt_set = c_alt_set.intersection(nc_alt_set)
co_in_c, co_in_nc = 0, 0
for c, nc in zip(c_alt, nc_alt):
if c in co_alt_set:
co_in_c += 1
if nc in nc_alt_set:
co_in_nc += 1
print('#Altlexes rep casual - {}'.format(len(c_alt_set)))
print('#Altlexes rep non_casual - {}'.format(len(nc_alt_set)))
print('#Altlexes in both set - {}'.format(len(co_alt_set)))
print(co_alt_set)
print('#CoAltlex in causal - {}'.format(co_in_c))
print('#CoAltlex in non_causal - {}'.format(co_in_nc))
def seg_length(sentences):
seg_len = []
for sen in sentences:
seg_len.append((len(sen[0]), len(sen[1]), len(sen[2])))
return seg_len
def check_null(sen):
flag = False
if len(sen) == 3:
# if len(sen[0]) > 0:
# pre = sen[0]
# else:
# pre = ['<NULL>']
# flag = True
# if len(sen[1]) > 0:
# mid = sen[1]
# else:
# mid = ['<NULL>']
# flag = True
# if len(sen[2]) > 0:
# cur = sen[2]
# else:
# cur = ['<NULL>']
# flag = True
pre = sen[0] if len(sen[0]) > 0 else ['<NULL>']
mid = sen[1] if len(sen[1]) > 0 else ['<NULL>']
cur = sen[2] if len(sen[2]) > 0 else ['<NULL>']
else:
pre = sen[0] if len(sen[0]) > 0 else ['<NULL>']
mid = ['<NULL>']
cur = ['<NULL>']
flag = True
return pre, mid, cur, flag
def preprocess_train(file_path, file_name, data_type, is_build=False):
print("Generating {} examples...".format(data_type))
examples = []
engs, sims = [], []
seg_engs, seg_sims, labels = [], [], []
data_path = os.path.join(file_path, file_name)
lines = open(data_path, 'r', encoding='ISO-8859-1').readlines()
for line in lines:
line = line.strip().split('\t')
if line[0] == 'label':
continue
labels.append(int(line[0]))
del line[0]
if is_build:
engs.append(word_tokenize(SPACE.join(line[:3]).strip()))
sims.append(word_tokenize(SPACE.join(line[3:]).strip()))
seg_engs.append([word_tokenize(seg) for seg in line[:3]])
seg_sims.append([word_tokenize(seg) for seg in line[3:]])
english_punctuations = [',', '.', ':', ';', '?', '(', ')', '[', ']', '&', '!', '*', '@', '#', '$', '%',
'"', '``', '-', '\'\'']
if is_build:
eng_filtered = [[word.lower() for word in document if word not in english_punctuations] for document in engs]
sim_filtered = [[word.lower() for word in document if word not in english_punctuations] for document in sims]
seg_eng_filtered = [[[word.lower() for word in seg if word not in english_punctuations] for seg in eng] for eng
in seg_engs]
seg_sim_filtered = [[[word.lower() for word in seg if word not in english_punctuations] for seg in sim] for sim
in seg_sims]
total = 0
seq_len = []
for label, eng, sim in zip(labels, seg_eng_filtered, seg_sim_filtered):
total += 1
pre, mid, cur, flag = check_null(eng)
if flag:
print(total)
examples.append({'eid': total,
'tokens': pre + mid + cur,
'tokens_pre': pre,
'tokens_alt': mid,
'tokens_cur': cur,
'cau_label': label})
seq_len.append(len(pre + mid + cur))
total += 1
pre, mid, cur, flag = check_null(sim)
if flag:
print(total)
examples.append({'eid': total,
'tokens': pre + mid + cur,
'tokens_pre': pre,
'tokens_alt': mid,
'tokens_cur': cur,
'cau_label': label})
seq_len.append(len(pre + mid + cur))
if is_build:
sentences = []
for eng_tokens, sim_tokens in zip(eng_filtered, sim_filtered):
sentences.append(SPACE.join(eng_tokens))
sentences.append(SPACE.join(sim_tokens))
else:
sentences = []
np.random.shuffle(examples)
stat_length(seq_len)
return examples, sentences, (seg_eng_filtered, seg_sim_filtered), labels
def preprocess_test(file_path, file_name, data_type, is_build=False):
print("Generating {} examples...".format(data_type))
examples = []
sentences, segments, labels = [], [], []
data_path = os.path.join(file_path, file_name)
lines = open(data_path, 'r', encoding='ISO-8859-1').readlines()
for line in lines:
line = line.strip().split('\t')
num = int(line[-1])
del line[-1]
labels.append(0 if num == 0 else 1)
sentences.append(word_tokenize(SPACE.join(line).strip()))
if len(line) == 3:
segments.append([word_tokenize(seg) for seg in line])
else:
segments.append([['<NULL>'], word_tokenize(line[0]), word_tokenize(line[1])])
english_punctuations = [',', '.', ':', ';', '?', '(', ')', '[', ']', '&', '!', '*', '@', '#', '$', '%',
'"', '``', '-', '\'\'']
if is_build:
sen_filtered = [[word.lower() for word in sentence if word not in english_punctuations] for sentence in
sentences]
seg_filtered = [[[word.lower() for word in seg if word not in english_punctuations] for seg in eng] for eng in
segments]
total = 0
seq_len = []
for label, seg in zip(labels, seg_filtered):
total += 1
pre, mid, cur, flag = check_null(seg)
if flag:
print(total)
examples.append({'eid': total,
'tokens': pre + mid + cur,
'tokens_pre': pre,
'tokens_alt': mid,
'tokens_cur': cur,
'cau_label': label})
seq_len.append(len(pre + mid + cur))
# print('Get {} total examples'.format(total))
# print('Get {} causal examples'.format(causal))
# print('Get {} non-causal examples'.format(non_causal))
if is_build:
sentences = [SPACE.join(tokens) for tokens in sen_filtered]
else:
sentences = []
stat_length(seq_len)
return examples, sentences, seg_filtered, labels
def preprocess_transfer(file_path, file_name, data_type, is_build=False):
print("Generating {} examples...".format(data_type))
examples = []
data_path = os.path.join(file_path, file_name)
total = 0
with open(data_path, 'rb') as f:
data_set = json.load(f)
f.close()
for label, sample in zip(data_set['label'], data_set['sample']):
total += 1
pre, mid, cur, flag = check_null(sample)
if flag:
print(total)
examples.append({'eid': total,
'tokens': pre + mid + cur,
'tokens_pre': pre,
'tokens_alt': mid,
'tokens_cur': cur,
'cau_label': label})
return examples
def build_dict(data_path):
dictionary = {}
with open(data_path, 'r', encoding='utf8') as fh:
for line in fh:
line = line.strip().split(' ')
fredist = nltk.FreqDist(line)
for localkey in fredist.keys():
if localkey in dictionary.keys():
dictionary[localkey] = dictionary[localkey] + fredist[localkey]
else:
# 如果字典中不存在
dictionary[localkey] = fredist[localkey] # 将当前词频添加到字典中
return set(dictionary)
def save(filename, obj, message=None):
if message is not None:
print('Saving {}...'.format(message))
if message == 'corpus':
with open(filename, 'w', encoding='utf8') as fh:
fh.writelines([line + '\n' for line in obj])
elif message == 'embeddings':
with open(filename, 'wb') as fh:
pkl.dump(obj, fh)
else:
with open(filename, 'w', encoding='utf8') as fh:
json.dump(obj, fh)
fh.close()
def get_embedding(data_type, corpus_dict, emb_file=None, vec_size=None):
print("Generating {} embedding...".format(data_type))
# token2id = {'<NULL>': 0, '<OOV>': 1, '<LEARN>': 2}
token2id = {'<NULL>': 0, '<OOV>': 1}
if emb_file is not None:
assert vec_size is not None
with open(emb_file, 'rb') as fin:
trained_embeddings = pkl.load(fin)
fin.close()
embedding_dict = set(trained_embeddings)
print('Num of tokens in corpus {}'.format(len(corpus_dict)))
filtered_tokens = corpus_dict.intersection(embedding_dict) # common
oov_tokens = corpus_dict.difference(filtered_tokens)
combined_tokens = []
for token in oov_tokens:
if len(token.split('-')) > 1:
combined_tokens.append(token)
combined_tokens = set(combined_tokens)
# oov_tokens = oov_tokens.difference(combined_tokens)
# token2id = {'<NULL>': 0, '<OOV>': 1}
# embedding_mat = np.zeros([len(corpus_dict) + 2, vec_size])
embedding_mat = np.zeros([len(filtered_tokens) + len(token2id), vec_size])
for token in filtered_tokens:
token2id[token] = len(token2id)
embedding_mat[token2id[token]] = trained_embeddings[token]
combined = 0
for tokens in combined_tokens:
sub_tokens = tokens.split('-')
token_vec = np.zeros([vec_size])
in_emb = 0
for t in sub_tokens:
if t in filtered_tokens:
token_vec += trained_embeddings[t]
in_emb += 1
if in_emb > 0:
combined += 1
token2id[tokens] = len(token2id)
embedding_mat = np.row_stack((embedding_mat, token_vec / in_emb))
scale = 3.0 / max(1.0, (len(corpus_dict) + vec_size) / 2.0)
embedding_mat[1] = np.random.uniform(-scale, scale, vec_size)
print('Filtered_tokens: {} Combined_tokens: {} OOV_tokens: {}'.format(len(filtered_tokens),
combined,
len(oov_tokens)))
else:
embedding_mat = np.random.uniform(-0.25, 0.25, (len(corpus_dict) + len(token2id), vec_size))
embedding_mat[0] = np.zeros(vec_size)
embedding_mat[1] = np.zeros(vec_size)
for token in corpus_dict:
token2id[token] = len(token2id)
# id2token = dict([val, key] for key, val in token2id.items())
id2token = dict(zip(token2id.values(), token2id.keys()))
# print(len(token2id), len(id2token), len(embedding_mat))
return embedding_mat, token2id, id2token
def gen_embedding(data_type, corpus_dict, emb_file=None, vec_size=None):
print("Generating {} embedding...".format(data_type))
# token2id = {'<NULL>': 0, '<OOV>': 1, '<LEARN>': 2}
token2id = {'<NULL>': 0, '<OOV>': 1}
if emb_file is not None:
assert vec_size is not None
with open(emb_file, 'rb') as fin:
trained_embeddings = pkl.load(fin)
fin.close()
embedding_dict = set(trained_embeddings)
print('Num of tokens in corpus {}'.format(len(corpus_dict)))
filtered_tokens = corpus_dict.intersection(embedding_dict) # common
oov_tokens = corpus_dict.difference(filtered_tokens)
combined_tokens = []
for token in oov_tokens:
if len(token.split('-')) > 1:
combined_tokens.append(token)
combined_tokens = set(combined_tokens)
# oov_tokens = oov_tokens.difference(combined_tokens)
# token2id = {'<NULL>': 0, '<OOV>': 1}
# embedding_mat = np.zeros([len(corpus_dict) + 2, vec_size])
embedding_mat = np.zeros([len(filtered_tokens) + len(token2id), vec_size])
for token in filtered_tokens:
token2id[token] = len(token2id)
embedding_mat[token2id[token]] = trained_embeddings[token]
combined = 0
for tokens in combined_tokens:
sub_tokens = tokens.split('-')
token_vec = np.zeros([vec_size])
in_emb = 0
for t in sub_tokens:
if t in filtered_tokens:
token_vec += trained_embeddings[t]
in_emb += 1
if in_emb > 0:
combined += 1
token2id[tokens] = len(token2id)
embedding_mat = np.row_stack((embedding_mat, token_vec / in_emb))
scale = 3.0 / max(1.0, (len(corpus_dict) + vec_size) / 2.0)
embedding_mat[1] = np.random.uniform(-scale, scale, vec_size)
print('Filtered_tokens: {} Combined_tokens: {} OOV_tokens: {}'.format(len(filtered_tokens),
combined,
len(oov_tokens)))
else:
embedding_mat = np.random.uniform(-0.25, 0.25, (len(corpus_dict) + len(token2id), vec_size))
embedding_mat[0] = np.zeros(vec_size)
embedding_mat[1] = np.zeros(vec_size)
for token in corpus_dict:
token2id[token] = len(token2id)
# id2token = dict([val, key] for key, val in token2id.items())
id2token = dict(zip(token2id.values(), token2id.keys()))
# print(len(token2id), len(id2token), len(embedding_mat))
return embedding_mat, token2id, id2token
def seg_length(sentences):
seg_len = []
for sen in sentences:
seg_len.append((len(sen[0]), len(sen[1]), len(sen[2])))
return seg_len
def gen_annotation(segs, max_length, filename, labels, data_type):
max_length = max_length['full']
if data_type == 'train':
eng_length = seg_length(segs[0])
sim_length = seg_length(segs[1])
with open(filename, 'w', encoding='utf8') as f:
for el, sl, label in zip(eng_length, sim_length, labels):
pre, alt, cur = el
if sum(el) > max_length:
cur -= pre + alt + cur - max_length
annos = '0 ' * pre
annos += '1 ' if label == 1 else '2 ' * alt
annos += '0 ' * cur
f.write(annos.strip() + '\n')
pre, alt, cur = sl
if sum(sl) > max_length:
cur -= pre + alt + cur - max_length
annos = '0 ' * pre
annos += '1 ' if label == 1 else '2 ' * alt
annos += '0 ' * cur
f.write(annos.strip() + '\n')
f.close()
else:
length = seg_length(segs)
with open(filename, 'w', encoding='utf8') as f:
for l, label in zip(length, labels):
pre, alt, cur = l
if sum(l) > max_length:
cur -= pre + alt + cur - max_length
annos = '0 ' * pre
annos += '1 ' if label == 1 else '2 ' * alt
annos += '0 ' * cur
f.write(annos.strip() + '\n')
f.close()
def build_features(sentences, data_type, max_len, out_file, word2id, annotation_file=None):
print("Processing {} examples...".format(data_type))
total = 0
meta = {}
samples = []
# fh = open(annotation_file, 'r', encoding='utf8')
for sentence in tqdm(sentences):
total += 1
tokens = np.zeros([max_len['full']], dtype=np.int32)
tokens_pre = np.zeros([max_len['pre']], dtype=np.int32)
tokens_alt = np.zeros([max_len['alt']], dtype=np.int32)
tokens_cur = np.zeros([max_len['cur']], dtype=np.int32)
def _get_word(word):
for each in (word, word.lower(), word.capitalize(), word.upper()):
if each in word2id:
return word2id[each]
return 1
seq_len = min(len(sentence['tokens']), max_len['full'])
pre_len = min(len(sentence['tokens_pre']), max_len['pre'])
alt_len = min(len(sentence['tokens_alt']), max_len['alt'])
cur_len = min(len(sentence['tokens_cur']), max_len['cur'])
for i in range(seq_len):
tokens[i] = _get_word(sentence['tokens'][i])
for i in range(pre_len):
tokens_pre[i] = _get_word(sentence['tokens_pre'][i])
for i in range(alt_len):
tokens_alt[i] = _get_word(sentence['tokens_alt'][i])
for i in range(cur_len):
tokens_cur[i] = _get_word(sentence['tokens_cur'][i])
samples.append({'id': sentence['eid'],
'tokens': tokens,
'tokens_pre': tokens_pre,
'tokens_alt': tokens_alt,
'tokens_cur': tokens_cur,
'length': seq_len,
'cau_label': sentence['cau_label']})
# fh.close()
with open(out_file, 'wb') as fo:
pkl.dump(samples, fo)
fo.close()
print('Build {} instances of features in total'.format(total))
meta['total'] = total
return meta
def run_prepare(config):
train_examples, train_corpus, train_seg, train_labels = preprocess_train(config.raw_dir, config.train_file,
'train', config.build)
transfer_examples1 = preprocess_transfer(config.raw_dir, config.transfer_file1, 'transfer')
transfer_examples2 = preprocess_transfer(config.raw_dir, config.transfer_file2, 'transfer')
valid_examples, valid_corpus, valid_seg, valid_labels = preprocess_test(config.raw_dir, config.valid_file,
'valid', config.build)
test_examples, test_corpus, test_seg, test_labels = preprocess_test(config.raw_dir, config.test_file,
'test', config.build)
if config.build:
# types = ['train', 'valid', 'test']
# labels = [train_labels, valid_labels, test_labels]
# segs = [train_seg, valid_seg, test_seg]
# for t, s, l in zip(types, segs, labels):
# gen_annotation(s, config.max_len, os.path.join(config.processed_dir, t + '_annotations.txt'), l, t)
save(config.corpus_file, train_corpus, 'corpus')
corpus_dict = build_dict(config.corpus_file)
token_emb_mat, token2id, id2token = get_embedding('word', corpus_dict, config.w2v_file, config.n_emb)
save(config.token_emb_file, token_emb_mat, message='embeddings')
save(config.token2id_file, token2id, message='token to index')
save(config.id2token_file, id2token, message='index to token')
else:
with open(config.token2id_file, 'r') as fh:
token2id = json.load(fh)
transfer_meta1 = build_features(transfer_examples1, 'transfer', config.max_len, config.transfer_record_file1,
token2id)
save(config.transfer_meta1, transfer_meta1, message='transfer meta')
del transfer_examples1
transfer_meta2 = build_features(transfer_examples2, 'transfer', config.max_len, config.transfer_record_file2,
token2id)
save(config.transfer_meta2, transfer_meta2, message='transfer meta')
del transfer_examples2
train_meta = build_features(train_examples, 'train', config.max_len, config.train_record_file, token2id,
config.train_annotation)
save(config.train_meta, train_meta, message='train meta')
del train_examples, train_corpus
valid_meta = build_features(valid_examples, 'valid', config.max_len, config.valid_record_file, token2id)
save(config.valid_meta, valid_meta, message='valid meta')
del valid_examples, valid_corpus
test_meta = build_features(test_examples, 'test', config.max_len, config.test_record_file, token2id,
config.test_annotation)
save(config.test_meta, test_meta, message='test meta')
del test_examples, test_corpus
save(config.shape_meta, {'max_len': config.max_len}, message='shape meta')
| {"hexsha": "be8bc31ec262c7ee031c00356d8cae4a016cbe4c", "size": 21636, "ext": "py", "lang": "Python", "max_stars_repo_path": "preprocess/torch_preprocess.py", "max_stars_repo_name": "shiningliang/Multi-level-Causality-Detection-Network", "max_stars_repo_head_hexsha": "126a11735781224aaa31af37728e6dd0b2e511c2", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 5, "max_stars_repo_stars_event_min_datetime": "2019-12-06T09:11:44.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-09T12:02:43.000Z", "max_issues_repo_path": "preprocess/torch_preprocess.py", "max_issues_repo_name": "shiningliang/Multi-level-Causality-Detection-Network", "max_issues_repo_head_hexsha": "126a11735781224aaa31af37728e6dd0b2e511c2", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 3, "max_issues_repo_issues_event_min_datetime": "2019-12-06T09:11:34.000Z", "max_issues_repo_issues_event_max_datetime": "2020-02-25T13:44:17.000Z", "max_forks_repo_path": "preprocess/torch_preprocess.py", "max_forks_repo_name": "shiningliang/Multi-level-Causality-Detection-Network", "max_forks_repo_head_hexsha": "126a11735781224aaa31af37728e6dd0b2e511c2", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 41.055028463, "max_line_length": 117, "alphanum_fraction": 0.5566648179, "include": true, "reason": "import numpy,from scipy", "num_tokens": 5238} |
#The MIT License
#
#Copyright (c) 2017 DYNI machine learning & bioacoustics team - Univ. Toulon
#
#Permission is hereby granted, free of charge, to any person obtaining a copy of
#this software and associated documentation files (the "Software"), to deal in
#the Software without restriction, including without limitation the rights to
#use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
#the Software, and to permit persons to whom the Software is furnished to do so,
#subject to the following conditions:
#
#The above copyright notice and this permission notice shall be included in all
#copies or substantial portions of the Software.
#
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
#FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
#COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
#IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
#CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import os
import random
import pytest
import numpy as np
from dynibatch.utils.exceptions import ParameterError
from dynibatch.utils import segment
from dynibatch.utils import segment_container
from dynibatch.utils import feature_container
from dynibatch.utils import datasplit_utils
from dynibatch.utils import utils
from dynibatch.parsers import label_parsers
DATA_PATH = os.path.join(os.path.dirname(__file__), "data")
TEST_AUDIO_PATH_TUPLE_1 = (DATA_PATH, "dataset1/ID0132.wav")
TEST_AUDIO_PATH_TUPLE_2 = (DATA_PATH, "dataset2/ID1238.wav")
TEST_SEG_PATH_TUPLE_1 = (DATA_PATH, "dataset1/ID0132.seg")
TEST_SEG_PATH_TUPLE_2 = (DATA_PATH, "dataset1/ID0133.seg")
TEST_DURATION = 15.45
TEST_N_SEGMENTS = 4
TEST_FIRST_SEGMENT_DURATION = 0.79
SEGMENT_CONTAINER_LISTS_TO_GENERATE = 100
TEST_FILE2LABEL_PATH = os.path.join(DATA_PATH, "file2label.csv")
TEST_LABELS_PATH = os.path.join(DATA_PATH, "labels.txt")
class TestSegment:
def test_init(self):
try:
start_time = 1
end_time = 10
segment.Segment(start_time, end_time)
except ParameterError:
pytest.fail("Unexpected ParameterError")
def test_negative_start_time(self):
with pytest.raises(ParameterError):
start_time = -1
end_time = 10
segment.Segment(start_time, end_time)
def test_time_order(self):
with pytest.raises(ParameterError):
start_time = 3
end_time = 1
segment.Segment(start_time, end_time)
def test_set_segment_labels(self):
segment_from_list = []
segment_from_list.append(segment.Segment(0, 1, "a"))
segment_from_list.append(segment.Segment(1.2, 2, "b"))
segment_to_list = []
segment_to_list.append(segment.Segment(0.2, 0.3))
segment_to_list.append(segment.Segment(0.6, 1.1))
segment_to_list.append(segment.Segment(1.7, 1.8))
segment_to_list.append(segment.Segment(2.5, 2.7))
segment.set_segment_labels(segment_from_list,
segment_to_list,
overlap_ratio=0.5)
assert(segment_to_list[0].label == "a" and
segment_to_list[1].label == "a" and
segment_to_list[2].label == "b" and
segment_to_list[3].label == segment.CommonLabels.unknown.value)
def test_set_segment_labels_overlap(self):
segment_from_list = []
segment_from_list.append(segment.Segment(0, 1, "a"))
segment_to_list = []
segment_to_list.append(segment.Segment(0.4, 1.5))
segment_to_list.append(segment.Segment(0.6, 1.5))
segment.set_segment_labels(segment_from_list,
segment_to_list,
overlap_ratio=0.5)
assert(segment_to_list[0].label == "a" and
segment_to_list[1].label == segment.CommonLabels.unknown.value)
class TestSegmentContainer:
def test_no_segments_n_segments(self):
sc = segment_container.SegmentContainer("fake_audio_path")
assert sc.n_segments == 0
def test_no_segments_n_active_segments(self):
sc = segment_container.SegmentContainer("fake_audio_path")
assert sc.n_active_segments == 0
def test_no_segments_n_segments_w_label(self):
sc = segment_container.SegmentContainer("fake_audio_path")
assert sc.n_segments_with_label("fake_label") == 0
def test_no_segments_n_active_segments_w_label(self):
sc = segment_container.SegmentContainer("fake_audio_path")
assert sc.n_active_segments_with_label("fake_label") == 0
def test_no_segments_features(self):
sc = segment_container.SegmentContainer("fake_audio_path")
assert not any(sc.has_features(["fake_feature"]))
def test_n_segments(self):
sc = segment_container.SegmentContainer("fake_audio_path")
n_segments = 5
for i in range(n_segments):
sc.segments.append(segment.Segment(i * 1, (i + 1) * 1))
assert sc.n_segments == n_segments
def test_n_active_segments(self):
sc = segment_container.SegmentContainer("fake_audio_path")
n_segments = 5
active_segment_ind = [2, 3]
for i in range(n_segments):
sc.segments.append(segment.Segment(i * 1, (i + 1) * 1))
if i in active_segment_ind:
sc.segments[-1].activity = True
assert sc.n_active_segments == len(active_segment_ind)
def test_no_active_segments(self):
sc = segment_container.SegmentContainer("fake_audio_path")
n_segments = 5
for i in range(n_segments):
sc.segments.append(segment.Segment(i * 1, (i + 1) * 1))
assert sc.n_active_segments == 0
def test_n_segments_w_label(self):
sc = segment_container.SegmentContainer("fake_audio_path")
n_segments = 5
for i in range(n_segments):
sc.segments.append(segment.Segment(i * 1, (i + 1) * 1))
assert sc.n_segments_with_label(segment.CommonLabels.unknown.value) == n_segments
def test_n_active_segments_w_labels(self):
sc = segment_container.SegmentContainer("fake_audio_path")
n_segments = 5
active_segment_ind = [2, 3]
for i in range(n_segments):
sc.segments.append(segment.Segment(i * 1, (i + 1) * 1))
if i in active_segment_ind:
sc.segments[-1].activity = True
assert sc.n_active_segments_with_label(segment.CommonLabels.unknown.value) == len(active_segment_ind)
def test_create_random_segment_containers(self):
sc_ref = segment_container.create_segment_containers_from_audio_files(
DATA_PATH,
shuffle=True,
label_parser=None)
sc_generated = []
for _ in range(SEGMENT_CONTAINER_LISTS_TO_GENERATE):
sc_generated.append(segment_container.create_segment_containers_from_audio_files(
DATA_PATH,
shuffle=True,
label_parser=None))
sc_ref = list(sc_ref)
list_equals = 0
for sc_try in sc_generated:
list_equals += sc_ref == list(sc_try)
assert list_equals < SEGMENT_CONTAINER_LISTS_TO_GENERATE
def test_create_segment_container_from_audio_file_tuple(self):
with pytest.raises(TypeError):
segment_container.create_segment_container_from_audio_file(
os.path.join(*TEST_AUDIO_PATH_TUPLE_1))
def test_create_segment_container_from_audio_file_n_segment(self):
sc = segment_container.create_segment_container_from_audio_file(
TEST_AUDIO_PATH_TUPLE_1)
assert sc.n_segments == 1
def test_create_segment_container_from_audio_file_segment_duration(self):
sc = segment_container.create_segment_container_from_audio_file(
TEST_AUDIO_PATH_TUPLE_1)
assert np.abs(sc.segments[0].duration - TEST_DURATION) < 1e-03
def test_create_segment_container_from_seg_file_tuple(self):
labels = label_parsers.parse_label_file(TEST_LABELS_PATH, separator=",")
with pytest.raises(TypeError):
segment_container.create_segment_container_from_seg_file(
os.path.join(*TEST_SEG_PATH_TUPLE_1), labels, seg_file_separator=",")
def test_create_segment_container_from_seg_file_n_segment(self):
labels = label_parsers.parse_label_file(TEST_LABELS_PATH, separator=",")
sc = segment_container.create_segment_container_from_seg_file(
TEST_SEG_PATH_TUPLE_1, labels, seg_file_separator=",")
assert sc.n_segments == TEST_N_SEGMENTS
def test_create_segment_container_from_seg_file_segment_duration(self):
labels = label_parsers.parse_label_file(TEST_LABELS_PATH, separator=",")
sc = segment_container.create_segment_container_from_seg_file(
TEST_SEG_PATH_TUPLE_1, labels, seg_file_separator=",")
assert np.abs(sc.segments[0].duration - TEST_FIRST_SEGMENT_DURATION) < 1e-03
def test_create_segment_container_from_seg_file_labels(self):
labels = label_parsers.parse_label_file(TEST_LABELS_PATH, separator=",")
sc_1 = segment_container.create_segment_container_from_seg_file(
TEST_SEG_PATH_TUPLE_1, labels, seg_file_separator=",")
sc_2 = segment_container.create_segment_container_from_seg_file(
TEST_SEG_PATH_TUPLE_2, labels, seg_file_separator=",")
assert sc_1.segments[0].label == segment.CommonLabels.unknown.value
assert sc_2.segments[0].label == 3
def test_create_fixed_duration_segments_duration(self):
file_duration = 12.5
seg_duration = 0.4
seg_overlap = 0.3
segments = segment_container.create_fixed_duration_segments(file_duration,
seg_duration,
seg_overlap)
assert np.all(np.isclose(np.asarray([s.duration for s in segments]),
seg_duration))
def test_create_fixed_duration_segments_n_segments(self):
file_duration = 12.5
seg_duration = 0.4
seg_overlap = 0.3
segments = segment_container.create_fixed_duration_segments(file_duration,
seg_duration,
seg_overlap)
assert len(segments) == utils.get_n_overlapping_chunks(
file_duration,
seg_duration,
seg_overlap)
def test_create_fixed_duration_segments_from_short_audio(self):
file_duration = 2
seg_duration = 3
seg_overlap = 0.3
segments = segment_container.create_fixed_duration_segments(file_duration,
seg_duration,
seg_overlap)
assert len(segments) == 1
def test_parse_segment_file_line(self):
line = "0.12; 0.15 ; 3 "
start_time, end_time, label_id = (
segment_container._parse_segment_file_line(line, ";"))
assert (np.isclose(start_time, 0.12) and np.isclose(end_time, 0.15) and
label_id == 3)
class TestFeatureContainer:
def test_init(self):
try:
feature_container.FeatureContainer("fake_audio_path",
22050,
256,
128)
except:
pytest.fail("Unexpected Error")
def test_features_ok(self):
features = ["feat1", "feat2"]
configs = ["config1", "config2"]
fc = feature_container.FeatureContainer("fake_audio_path",
22050,
256,
128)
fc.features["feat1"]["data"] = np.random.sample(10)
fc.features["feat1"]["config"] = "config1"
fc.features["feat2"]["data"] = np.random.sample(10)
fc.features["feat2"]["config"] = "config2"
assert all(fc.has_features(list(zip(features, configs))))
def test_features_wrong_features(self):
features = ["feat1", "feat3"]
configs = ["config1", "config2"]
fc = feature_container.FeatureContainer("fake_audio_path",
22050,
256,
128)
fc.features["feat1"]["data"] = np.random.sample(10)
fc.features["feat1"]["config"] = "config1"
fc.features["feat2"]["data"] = np.random.sample(10)
fc.features["feat2"]["config"] = "config2"
assert not all(fc.has_features(list(zip(features, configs))))
def test_features_wrong_configs(self):
features = ["feat1", "feat2"]
configs = ["config1", "config3"]
fc = feature_container.FeatureContainer("fake_audio_path",
22050,
256,
128)
fc.features["feat1"]["data"] = np.random.sample(10)
fc.features["feat1"]["config"] = "config1"
fc.features["feat2"]["data"] = np.random.sample(10)
fc.features["feat2"]["config"] = "config2"
assert not all(fc.has_features(list(zip(features, configs))))
def test_features_empty_features(self):
features = ["feat1", "feat2"]
configs = ["config1", "config2"]
fc = feature_container.FeatureContainer("fake_audio_path",
22050,
256,
128)
fc.features["feat1"]["data"] = np.random.sample(10)
fc.features["feat1"]["config"] = "config1"
fc.features["feat2"]["config"] = "config2"
assert not all(fc.has_features(list(zip(features, configs))))
def test_time_to_frame_ind(self):
sample_rate = 22050
win_size = 256
hop_size = 128
fc = feature_container.FeatureContainer("fake_audio_path",
sample_rate,
win_size,
hop_size)
assert fc.time_to_frame_ind(0.015) == 2
class TestUtils:
def test_get_n_overlapping_chunks(self):
file_duration = 12.5
seg_duration = 0.4
seg_overlap = 0.3
start = 0
hop = seg_duration * (1 - seg_overlap)
n_chunks = 0
while start + seg_duration < file_duration:
start += hop
n_chunks += 1
assert utils.get_n_overlapping_chunks(file_duration,
seg_duration,
seg_overlap) == n_chunks
class TestDatasplit:
@pytest.fixture(scope="module")
def n_files(self):
return 1000
@pytest.fixture(scope="module")
def n_classes(self):
return 10
@pytest.fixture(scope="module")
def n_files_per_class(self, n_files, n_classes):
return int(n_files / n_classes)
@pytest.fixture(scope="module")
def file_list(self, n_files):
return ["f{}".format(i) for i in range(n_files)]
@pytest.fixture(scope="module")
def label_list(self, n_files, n_files_per_class):
return [int(i / n_files_per_class) for i in range(n_files)]
@pytest.fixture(scope="module")
def sc_list(self, n_files, file_list, label_list):
sc_list = []
for i in range(n_files):
sc = segment_container.SegmentContainer(file_list[i])
sc.segments.append(segment.Segment(0, 1, label_list[i]))
sc_list.append(sc)
return sc_list
def test_create_datasplit_init(self, file_list):
try:
file_set = set(file_list)
train_set = set(random.sample(file_set, 700))
validation_set = set(random.sample(file_set-train_set, 100))
test_set = set(random.sample(file_set-train_set-validation_set, 200))
datasplit_utils.create_datasplit(train_set,
validation_set,
test_set,
name="fake_datasplit")
except Exception as e:
pytest.fail("Unexpected Error: {}".format(e))
def test_create_datasplit_count(self, file_list):
file_set = set(file_list)
train_set = set(random.sample(file_set, 700))
validation_set = set(random.sample(file_set-train_set, 100))
test_set = set(random.sample(file_set-train_set-validation_set, 200))
ds = datasplit_utils.create_datasplit(train_set,
validation_set,
test_set,
name="fake_datasplit")
assert (len(ds["sets"]["train"]) == 700 and
len(ds["sets"]["validation"]) == 100 and
len(ds["sets"]["test"]) == 200)
def test_create_random_datasplit_init(self, sc_list):
try:
datasplit_utils.create_random_datasplit(sc_list,
train_ratio=0.7,
validation_ratio=0.1,
test_ratio=0.2)
except Exception as e:
pytest.fail("Unexpected Error: {}".format(e))
def test_create_random_datasplit_set_dont_sumup_to_one(self, sc_list):
with pytest.raises(ParameterError):
datasplit_utils.create_random_datasplit(sc_list,
train_ratio=0.8,
validation_ratio=0.1,
test_ratio=0.2)
def test_create_random_datasplit_count(self, sc_list):
ds = datasplit_utils.create_random_datasplit(sc_list,
train_ratio=0.7,
validation_ratio=0.1,
test_ratio=0.2)
assert (len(ds["sets"]["train"]) == 700 and
len(ds["sets"]["validation"]) == 100 and
len(ds["sets"]["test"]) == 200)
def test_datasplit_stats(self):
# TODO (jul)
pass
| {"hexsha": "1d343fd3c65745dc60ae4efdc8853a247fb02969", "size": 18957, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/test_utils.py", "max_stars_repo_name": "dynilib/dynibatch", "max_stars_repo_head_hexsha": "b647a50e64796d0d4558cc554060aa82427d468b", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 4, "max_stars_repo_stars_event_min_datetime": "2018-10-08T16:17:29.000Z", "max_stars_repo_stars_event_max_datetime": "2019-10-07T11:24:41.000Z", "max_issues_repo_path": "tests/test_utils.py", "max_issues_repo_name": "dynilib/dynibatch", "max_issues_repo_head_hexsha": "b647a50e64796d0d4558cc554060aa82427d468b", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tests/test_utils.py", "max_forks_repo_name": "dynilib/dynibatch", "max_forks_repo_head_hexsha": "b647a50e64796d0d4558cc554060aa82427d468b", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 42.6959459459, "max_line_length": 109, "alphanum_fraction": 0.5995146911, "include": true, "reason": "import numpy", "num_tokens": 3911} |
"""Side profiles of common trees in their bare (leafless) form."""
import matplotlib.pyplot as plt
import numpy as np
# ---------- Tree parameters ------------
# Overall profile dimensions parameters:
height = 100.0
width = 100.0
# Angles:
initial_angle = 0.5 * np.pi
angle_between_segments = 0.05 * np.pi
# Trunk and branch length:
initial_length = 400.0 # length of trunk
length_scaling = 0.6
# Trunk and branch thickness:
initial_thickness = 0.1
# ---------------------------------------
def canopy_fractal_tree(x, y, length, theta):
"""Recursive formation of a canopy fractal tree-like profile."""
if length >= 1.0:
x_new = x + length * np.cos(theta)
y_new = y + length * np.sin(theta)
thickness = (
initial_thickness * ((x_new - x) ** 2 + (y_new - y) ** 2) ** 0.5
)
plt.plot(
(x, x_new),
(y, y_new),
color="black",
linewidth=thickness,
solid_capstyle="round",
)
new_length = length * length_scaling
canopy_fractal_tree(
x_new, y_new, new_length, theta + angle_between_segments
)
canopy_fractal_tree(
x_new, y_new, new_length, theta - angle_between_segments
)
def plot_single_tree_profile(index, param_label_name, param_label_value):
"""Plots a single tree profile which is shown and also saved."""
plt.axes().set_aspect(1)
plt.axis("off")
canopy_fractal_tree(width, height, initial_length, initial_angle)
plt.title(
"Tree {}: parameter {} is '{}'".format(
index, param_label_name, param_label_value
)
)
plt.savefig("example-profiles/basic-canopy-fractal-{}.png".format(index))
plt.show()
# Plot some representative examples:
for value in range(5):
angle_between_segments
plot_single_tree_profile(
str(value + 1), "angle_between_segments", angle_between_segments
)
angle_between_segments *= 1.5
| {"hexsha": "590195e366c79cefb5df2d33bac09c14d54fa8cd", "size": 1988, "ext": "py", "lang": "Python", "max_stars_repo_path": "tree-profiles/tree_profiles.py", "max_stars_repo_name": "sadielbartholomew/creative-matplotlib", "max_stars_repo_head_hexsha": "b881025b63e9fcfdeea8e41a8f212a2c83c64a24", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 39, "max_stars_repo_stars_event_min_datetime": "2020-06-28T12:33:57.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-10T09:17:33.000Z", "max_issues_repo_path": "tree-profiles/tree_profiles.py", "max_issues_repo_name": "sadielbartholomew/creative-matplotlib", "max_issues_repo_head_hexsha": "b881025b63e9fcfdeea8e41a8f212a2c83c64a24", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 3, "max_issues_repo_issues_event_min_datetime": "2021-08-16T09:05:41.000Z", "max_issues_repo_issues_event_max_datetime": "2021-08-23T07:53:08.000Z", "max_forks_repo_path": "tree-profiles/tree_profiles.py", "max_forks_repo_name": "sadielbartholomew/creative-matplotlib", "max_forks_repo_head_hexsha": "b881025b63e9fcfdeea8e41a8f212a2c83c64a24", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2020-06-28T12:35:02.000Z", "max_forks_repo_forks_event_max_datetime": "2021-08-16T08:10:49.000Z", "avg_line_length": 28.4, "max_line_length": 77, "alphanum_fraction": 0.6161971831, "include": true, "reason": "import numpy", "num_tokens": 480} |
__precompile__(false)
module Buildkite
import HTTP
import JSON
export BuildkiteAPI
abstract type AbstractBuildkiteAPI end
Base.@kwdef struct BuildkiteAPI <: AbstractBuildkiteAPI
base_url::HTTP.URI = HTTP.URI("https://api.buildkite.com/v2/")
access_token::String = ""
end
function buildkite_api_uri(api::BuildkiteAPI, path)
HTTP.URIs.merge(api.base_url, path = api.base_url.path * path)
end
function buildkite_request(api::BuildkiteAPI, request_method, endpoint;
handle_error = true,
headers = Dict(),
params = Dict(),
allowredirects::Bool = true,
idle_timeout = 20,
status_exception = false)
api_endpoint = buildkite_api_uri(api, endpoint)
request_headers = convert(Dict{String, String}, headers)
if !haskey(request_headers, "Authorization")
request_headers["Authorization"] = "Bearer $(api.access_token)"
end
if !haskey(request_headers, "User-Agent")
request_headers["User-Agent"] = "Buildkite-jl"
end
if request_method == HTTP.get
api_uri = HTTP.URIs.merge(api_endpoint, query = params)
println("DEBUG: ", api_uri)
r = request_method(api_uri, request_headers,
redirect = allowredirects, status_exception = false, idle_timeout=idle_timeout)
else
api_uri = string(api_uri)
r = request_method(api_uri, request_headers, JSON.json(params),
redirect=allowredirects,
status_exception=status_exception,
idle_timeout=idle_timeout)
end
if handle_error
#handle_response_error(r)
end
return r
end
# REST primitives
function buildkite_get(api::BuildkiteAPI, endpoint = ""; options...)
buildkite_request(api, HTTP.get, endpoint; options...)
end
function buildkite_post(api::BuildkiteAPI, endpoint = ""; options...)
buildkite_request(api, HTTP.post, endpoint; options...)
end
function buildkite_put(api::BuildkiteAPI, endpoint = ""; options...)
buildkite_request(api, HTTP.put, endpoint; options...)
end
function buildkite_delete(api::BuildkiteAPI, endpoint = ""; options...)
buildkite_request(api, HTTP.delete, endpoint; options...)
end
function buildkite_patch(api::BuildkiteAPI, endpoint = ""; options...)
buildkite_request(api, HTTP.patch, endpoint; options...)
end
function buildkite_get_json(api::BuildkiteAPI, endpoint = ""; options...)
JSON.parse(HTTP.payload(buildkite_get(api, endpoint; options...), String))
end
function buildkite_post_json(api::BuildkiteAPI, endpoint = ""; options...)
JSON.parse(HTTP.payload(buildkite_post(api, endpoint; options...), String))
end
function buildkite_put_json(api::BuildkiteAPI, endpoint = ""; options...)
JSON.parse(HTTP.payload(buildkite_put(api, endpoint; options...), String))
end
function buildkite_delete_json(api::BuildkiteAPI, endpoint = ""; options...)
JSON.parse(HTTP.payload(buildkite_delete(api, endpoint; options...), String))
end
function buildkite_patch_json(api::BuildkiteAPI, endpoint = ""; options...)
JSON.parse(HTTP.payload(buildkite_patch(api, endpoint; options...), String))
end
function hello_world()
base_url = HTTP.URI("https://api.buildkite.com")
r = buildkite_get_json(BuildkiteAPI(base_url=base_url), "")
return r["response"]
end
# organization api
struct Organization
api::BuildkiteAPI
data::Dict
end
function organization(api::BuildkiteAPI, name)
return Organization(api, buildkite_get_json(api, "organizations/$(lowercase(name))"))
end
# pipelines api
struct Pipeline
api::BuildkiteAPI
data::Dict
end
function pipelines(api::BuildkiteAPI, organization; page=0, pagination=false)
query_params = Dict("page" => page)
endpoint = "organizations/$(lowercase(organization))/pipelines"
return [Pipeline(api, p) for p in buildkite_get_json(api, endpoint; params = query_params)]
end
function pipelines(org::Buildkite.Organization)
return pipelines(org.api, org.data["name"])
end
# build api
struct Build
api::BuildkiteAPI
data::Dict
end
function builds(api::BuildkiteAPI; state=nothing)
query_params = Dict("state" => state)
endpoint = "builds"
return [Build(api, b) for b in buildkite_get_json(api, endpoint; params = query_params)]
end
build_state(b::Build) = b.data["state"]
end
| {"hexsha": "9e5b2bbe850dcb784be668e59d9b08ba1c1f3bbf", "size": 4480, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/Buildkite.jl", "max_stars_repo_name": "jakebolewski/Buildkite.jl", "max_stars_repo_head_hexsha": "ea5870badc29f5605e48084823c41982b07701bf", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/Buildkite.jl", "max_issues_repo_name": "jakebolewski/Buildkite.jl", "max_issues_repo_head_hexsha": "ea5870badc29f5605e48084823c41982b07701bf", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/Buildkite.jl", "max_forks_repo_name": "jakebolewski/Buildkite.jl", "max_forks_repo_head_hexsha": "ea5870badc29f5605e48084823c41982b07701bf", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.6849315068, "max_line_length": 106, "alphanum_fraction": 0.6830357143, "num_tokens": 1062} |
[STATEMENT]
lemma (in mut_m) handshake_invL_eq_imp:
"eq_imp (\<lambda>(_::unit) s. (AT s (mutator m), s\<down> (mutator m), sys_hs_type s\<down>, sys_hs_pending m s\<down>, mem_store_buffers (s\<down> sys) (mutator m)))
handshake_invL"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. eq_imp (\<lambda>_ s. (AT s (mutator m), s\<down> (mutator m), sys_hs_type s\<down>, sys_hs_pending m s\<down>, sys_mem_store_buffers (mutator m) s\<down>)) handshake_invL
[PROOF STEP]
unfolding eq_imp_def handshake_invL_def
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<forall>s s'. (\<forall>x. (AT s (mutator m), s\<down> (mutator m), sys_hs_type s\<down>, sys_hs_pending m s\<down>, sys_mem_store_buffers (mutator m) s\<down>) = (AT s' (mutator m), s'\<down> (mutator m), sys_hs_type s'\<down>, sys_hs_pending m s'\<down>, sys_mem_store_buffers (mutator m) s'\<down>)) \<longrightarrow> ((atS (mutator m) hs_noop_locs s \<longrightarrow> sys_hs_type s\<down> = ht_NOOP) \<and> (atS (mutator m) hs_get_roots_locs s \<longrightarrow> sys_hs_type s\<down> = ht_GetRoots) \<and> (atS (mutator m) hs_get_work_locs s \<longrightarrow> sys_hs_type s\<down> = ht_GetWork) \<and> (atS (mutator m) ht_loaded_locs s \<longrightarrow> mut_hs_pending s\<down> \<longrightarrow> mut_hs_type s\<down> = sys_hs_type s\<down>) \<and> (atS (mutator m) hs_pending_loaded_locs s \<longrightarrow> mut_hs_pending s\<down> \<longrightarrow> sys_hs_pending m s\<down>) \<and> (atS (mutator m) hs_pending_locs s \<longrightarrow> mut_hs_pending s\<down>) \<and> (atS (mutator m) no_pending_mutations_locs s \<longrightarrow> filter (\<lambda>s. is_mw_Mutate s \<or> is_mw_Mutate_Payload s) (sys_mem_store_buffers (mutator m) s\<down>) = [])) = ((atS (mutator m) hs_noop_locs s' \<longrightarrow> sys_hs_type s'\<down> = ht_NOOP) \<and> (atS (mutator m) hs_get_roots_locs s' \<longrightarrow> sys_hs_type s'\<down> = ht_GetRoots) \<and> (atS (mutator m) hs_get_work_locs s' \<longrightarrow> sys_hs_type s'\<down> = ht_GetWork) \<and> (atS (mutator m) ht_loaded_locs s' \<longrightarrow> mut_hs_pending s'\<down> \<longrightarrow> mut_hs_type s'\<down> = sys_hs_type s'\<down>) \<and> (atS (mutator m) hs_pending_loaded_locs s' \<longrightarrow> mut_hs_pending s'\<down> \<longrightarrow> sys_hs_pending m s'\<down>) \<and> (atS (mutator m) hs_pending_locs s' \<longrightarrow> mut_hs_pending s'\<down>) \<and> (atS (mutator m) no_pending_mutations_locs s' \<longrightarrow> filter (\<lambda>s. is_mw_Mutate s \<or> is_mw_Mutate_Payload s) (sys_mem_store_buffers (mutator m) s'\<down>) = []))
[PROOF STEP]
by simp | {"llama_tokens": 1065, "file": "ConcurrentGC_Phases", "length": 2} |
""" Dependências """
import numpy as np
import pandas as pd
import multiprocessing
from tqdm import tqdm
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import StandardScaler
from sklearn.decomposition import PCA
from sklearn.metrics import confusion_matrix
from scipy import sparse
import operator
from .similarities import euclidean, cosine, jaccard, manhattan, imed, hamming
from .preprocess import Preprocessing
from .neurons import *
import random
import sys
import os
""" Classe Couple """
class Couple():
"""
.. py:class::
This class allows the coupling phenomena between two or more oscillators
.. py:function:: __init__(self,*args,**kwargs)
:param data: The membrane potential values of the N neurons at time t
:type data: numpy.ndarray
"""
def __init__(self,*args,**kwargs):
super().__init__()
self.data = kwargs.get('data')
def synapse(self, connections):
"""
.. py::function:
This function creates the synapse (coupling) between the neurons
:param connections: A matrix of the coupling force between the neurons
:type connections: numpy.ndarray
"""
self.data_matrix = np.zeros(self.data.size) + self.data[:,np.newaxis]
self.difference_data = self.data_matrix - self.data_matrix.T
self.making_connections = self.difference_data*connections
return np.mean(self.making_connections,axis=1)
""" Classe Modelo """
class Neuron(Couple):
"""
.. py:class::
This class represents the biological neuron model chosen, the amounts in the system and it's dynamics.
.. py::function: __init__(self,*args,**kwargs)
:param name: the neuron model name to be used.
:type name: str
:param model: the neuron model chosen
:type model: NeuronType
:param variables: the neuron model variables
:type variables: list
:param dynamic: the dynamic function of the neuron model.
:type dynamic: function
:param max_force: the maximum coupling force value. Default is None, depends on the model.
:type max_force: int or float
:param min_force: the minimum coupling force value. Default is None, depends on the model.
:type min_force: int or float
"""
def __init__(self, *args, **kwargs):
self.name = kwargs.get('name')
self.model = kwargs.get('model')
self.variables = kwargs.get('variables')
self.dynamic = kwargs.get('dynamic')
self.max_force = None
self.min_force = None
def choose_model(self, neurons):
"""
.. py::function:
This function chooses the model according to the model name given.
:param neurons: number of neurons in the system (samples)
:type neurons: int
"""
if self.name == 'Hodgkin-Huxley':
self.model = {
'HH':HodgkinHuxley(115,-12,86,120,36,.3,1),
'Chemical':Chemical(0.1,-70),
'SDE':SDE(0.02,0.5,1.0,115,-12,86,120,36,.3,1)
}
v = np.random.uniform(0.0,4.0,(neurons))
m = self.model['HH'].m0(v)[2]
n = self.model['HH'].n0(v)[2]
h = self.model['HH'].h0(v)[2]
y = self.model['Chemical'].y0(v)[2]
I = np.zeros((neurons))+20
self.variables = [v,m,n,h,y,I]
self.dynamic = self.hh_dynamic
self.max_force = 1.0
self.min_force = 0.2
elif self.name == 'Hindmarsh-Rose':
self.model = HindmarshRose(1,3.0, 1.0,5.0,0.001,1.6,-1.6,1)
x = np.random.uniform(-.5,.5,(neurons))
y = np.random.uniform(-.5,.5,(neurons))
z = np.random.uniform(-.5,.5,(neurons))
sigma = np.zeros(shape=(neurons))+2.0
self.variables = [x,y,z,sigma]
self.dynamic = self.hr_dynamic
self.max_force = 1.92
self.min_force = 0.076
elif self.name == 'Integrate-and-Fire':
self.model = IntegrateAndFire(0,1.0,10,1.0)
v = np.random.uniform(0,0.5,size=neurons)
I = np.zeros(shape=(neurons))+2.5
sigma = np.zeros(shape=(neurons))+0.2
self.variables = [v,I,sigma]
self.dynamic = self.iaf_dynamic
self.max_force = 1.2
self.min_force = 0.36
elif self.name == 'GLM':
self.model = GLM(
neurons = neurons,
spiketrain = np.ones(shape=(1,neurons)),
response = 'delay',
refractory = 'threshold',
current = 'linear',
threshold = -55
)
self.model.adjacency = np.ones(shape=(neurons,neurons))
np.fill_diagonal(self.model.adjacency,0)
self.model.weights = np.random.uniform(0,0.5,size=(neurons,neurons))
self.model.step = 0.2
self.model.window = 250
self.model.time = 500
delta, tau = .1,.1
delay_response_args = [delta, tau]
""" Refractory """
amplitude, rest_pot, tau_2, refrac, hyper = 1,-70,0.1,0.01,100
refractory_args = [amplitude, rest_pot, tau_2, refrac, hyper]
self.model.theta_amp = 100
self.model.delay = 0.3
self.model.fire_beta = 0.05
self.model.fire_tau = 10
self.model.fire = 0.9
i_ext = np.random.uniform(0,0.5,size=(neurons))
w0 = np.random.uniform(0,1,size=(neurons))
self.model.external_current(I_ext, w0)
current_args = [i_ext,w0]
self.variables = [delay_response_args, refractory_args, current_args]
self.dynamic = self.glm_dynamic
self.min_force = 0.468
self.max_force = 0.72
elif self.name == 'Rulkov':
self.model = Rulkov(4.2,0.001,-1.2)
x = np.random.uniform(-1,-1.2,size=neurons)
y = np.zeros((neurons)) -2.9
current = np.zeros((neurons))
self.variables = [x,y,current]
self.dynamic = self.rk_dynamic
self.min_force = 0.01
self.max_force = 0.22
elif self.name == 'Izhikevic':
self.model = Izhikevic(0.02,0.2,-55,0.93)
v = np.random.uniform(-65,-64,size=neurons)
u = 0.2*v
I = np.zeros((neurons))+10
self.variables = [v,u,I]
self.dynamic = self.iz_dynamic
self.min_force = 0.9
self.max_force = 1.5
elif self.name == 'CNV':
self.model = CNV(0.002, 0.4,0.3,0.3,0.1,0.01)
x = np.random.uniform(0,0.01,size=(neurons))
y = np.zeros((neurons))
j = np.zeros((neurons)) + 0.1123
self.variables = [x,y,j]
self.dynamic = self.cnv_dynamic
self.min_force = 0.298
self.max_force = 1.0
def hh_dynamic(self,step,connections, *args):
"""
.. py::function:
This function computes the Hodgkin-Huxley Dynamic.
:param step: time step.
:type step: int or float
:param connections: the coupling force square matrix
:type connections: numpy.ndarray
:param *args: list of variables of the biological neuron model
:type *args: list
"""
v,m,n,h,y,current = args[0][0], args[0][1], args[0][2], args[0][3], args[0][4], args[0][5]
self.data = v
coupling = self.synapse(connections)
next_v = v + self.model['SDE'].membrane_potential(v,m,n,h,current) - self.model['Chemical'].synapse(y,v)*step - coupling
next_m = m + self.model['SDE'].stochastic_sodium_activate(m,v)
next_n = n + self.model['SDE'].stochastic_potassium_activate(n,v)
next_h = h + self.model['SDE'].stochastic_sodium_deactivate(h,v)
next_y = y + self.model['SDE'].stochastic_chemical_transmitter(y,v)
return next_v,next_m,next_n,next_h,next_y,current
def hr_dynamic(self,step,connections,*args):
"""
.. py::function:
This function computes the Hindmarsh-Rose Dynamic.
:param step: time step.
:type step: int or float
:param connections: the coupling force square matrix
:type connections: numpy.ndarray
:param *args: list of variables of the biological neuron model
:type *args: list
"""
x,y,z,sigma = args[0][0], args[0][1], args[0][2], args[0][3]
self.data = x
coupling = self.synapse(self.connections)
next_x = x + self.model.potential(x,y,z)*step + sigma*x*np.random.uniform(0,step,size=x.size) - coupling
next_y = y + self.fast_ion_channel(x,y)*step + sigma*x*np.random.uniform(0,step,size=x.size)
next_z = z + self.slow_ion_channel(x,z)*step + sigma*x*np.random.uniform(0,step,size=x.size)
return next_x, next_y, next_z, sigma
def iaf_dynamic(self, step,connections, *args):
"""
.. py::function:
This function computes the Integrate-and-Fire Dynamic.
:param step: time step.
:type step: int or float
:param connections: the coupling force square matrix
:type connections: numpy.ndarray
:param *args: list of variables of the biological neuron model
:type *args: list
"""
v, I, sigma = args[0][0], args[0][1], args[0][2]
self.data = v
coupling = self.synapse(connections)
next_v = v + self.model.lif(v,I)*step + sigma*v*np.random.uniform(0,0.2,size=(v.size)) - coupling
v = self.model.reset(v,next_v)
return v, I, sigma
def glm_dynamic(self,*args):
"""
.. py::function:
This function computes the Generalized Linear Model Dynamic.
:param *args: list of variables of the biological neuron model
:type *args: list
"""
response_args, refractory_args, current_args = args[0], args[1], args[2]
response_args.insert(0,self.model.last_spike)
refractory_args.insert(0,self.model.last_spike)
self.model.incoming_spikes(response_args)
self.model.membrane_potential(refractory_args)
self.model.generate_spikes()
self.model.update()
self.model.external_current(self.model.I,current_args[1], initializer=np.random.uniform(0,0.5))
def rk_dynamic(self,step,connections, *args):
"""
.. py::function:
This function computes the Rulkov Dynamic.
:param step: time step.
:type step: int or float
:param connections: the coupling force square matrix
:type connections: numpy.ndarray
:param *args: list of variables of the biological neuron model
:type *args: list
"""
x,y,current = args[0][0], args[0][1], args[0][2]
self.data = x
coupling = self.synapse(self.connections)
x = self.model.fx(x,y,current) - coupling
y = self.model.fy(x,y)
return x,y,current
def iz_dynamic(self,step, connections, *args):
"""
.. py::function:
This function computes the Izhikevic Dynamic.
:param step: time step.
:type step: int or float
:param connections: the coupling force square matrix
:type connections: numpy.ndarray
:param *args: list of variables of the biological neuron model
:type *args: list
"""
v,u,current = args[0][0], args[0][1], args[0][2]
self.data = v
coupling = self.synapse(connections)
next_v = v + self.model.potential(v,u,current)*step - coupling
next_u = u + self.model.recovery(v,u)*step
new = self.model.update(next_v,next_u)
v,u = new[0],new[1]
return v, u, current
def cnv_dynamic(self,step,connections, *args):
"""
.. py::function:
This function computes the Courbage-Nekorkin-Vdovin Dynamic.
:param step: time step.
:type step: int or float
:param connections: the coupling force square matrix
:type connections: numpy.ndarray
:param *args: list of variables of the biological neuron model
:type *args: list
"""
x,y,j = args[0][0], args[0][1], args[0][2]
self.data = x
coupling = self.synapse(connections)
x = self.model.potential(x,y) - coupling
y = self.model.recovery(x,y,j)
return x,y,j
"""
Classe Similaridade
função Inicializador
Atributos:
Escolhido
"""
class Similarity():
"""
.. py::class:
This class involkes all the similarities kernels
"""
def __init__(self):
super().__init__()
self.chosen = {
'Euclidean': euclidean,
'Hamming': hamming,
'Manhattan':manhattan,
'Jaccard':jaccard,
'Cosine':cosine,
'IMED': imed
}
"""
Classe NSC
função Inicializador
função Calcula_Similaridade
função Calcula_Peso
função Inicializador de Pesos
função Estabelece_Parâmetros
função Obtém_Parâmetros
função Busca_Conexões
função Encontrar_Múltiplas_Incidências
função Atualizar_Conexões
função Atualizar_Modelo
função Fit
"""
class NSC():
"""
.. py::class:
This class implements the Neuron Synchronization Competition algorithm.
The semi-supervised method generates synchronized neurons based on the labeled data, and unsync for the
unlabeled, then the method uses similarities between the samples to create coupling connections, and those couplings
can be reinforced by the amount of connections inside a group, and punished by others. At the end, the labeled data are those
that are synchronized and segmented in different times.
.. py::function: __init__(self,*args,**kwargs)
:param preprocessing: Instance of the Preprocessing class.
:type preprocessing: Preprocessing
:param similarity: Instance of the Similarity class
:type similarity: Similarity
:param neuron: Instance of the Neuron class
:type neuron: Neuron
:param alpha ($\alpha$): Decay parameter of the exponential term in the connection creation equation.
:type alpha: int or float
:param beta ($\beta$): Decay parameter of the punishment exponential term
:type beta: into or float
:param capacity: The vector of connections'number that each neuron can have
:type capacity: numpy.ndarray
:param categorical: The categorical data of the dataset,
:type categorical: numpy.ndarray
:param classified: The classified sample at time t. Default is None.
:type classified: numpy.ndarray
:param classified_number: The number of classified data at each iteration t
:type classified_number: int
:param confusion_matrix: The confusion matrix related to the classification of the NSC algorithm
:type confusion_matrix: sklearn.metrics.confusion_matrix
:param data: A DataFrame object of the dataset used for the implementation of the NSC algorithm
:type data: pandas.core.frame.DataFrame
:param degree_out: A matrix Neurons x Labels + 1 in which each element represents the out degree coming from label j to neuron i,
and the last column is the total incoming in unlabeled neuron i.
:type degree_out: numpy.ndarray
:param distance_name: The similarity name used to calculate the coupling force between the samples
:type distance_name: str
:param disputed: A vector that stores the indexes of the disputed neurons
:type disputed: numpy.ndarray
:param found: A dictionary that stores the indexes of the neurons found in the search phase for each label
:type found: dict
:param gamma: The decay parameter of the exponential term at the reinforce term.
:type gamma: int or float
:param incident_degree: A matrix Neurons x Labels, where each element represents the amount of connections coming from the groups
different of j into the neuron i.
:type incident_degree: numpy.ndarray
:param indexes_of: A dictionary that stores the indexes of the labeled and unlabeled data.
:type indexes-of: dict
:param inner_degree: A dictionary that stores the amont of connections per label.
:type inner_degree: dict
:param labels_array: A matrix that stores the label that are passed from neuron i to neuron j
:type labels_array: numpy.ndarray
:param labels_list: The list of labels
:type labels_list: list
:param max_std: Maximum attribute standard deviation
:type max_std: into or float
:param neighbors: Maximum amount of connections that a neuron can make
:type neighbors: int
:param neuron.name: The name of the biological neuron model used.
:type neuron.name: str
:param neurons: The amount of neurons (samples)
:type neurons: int
:param numerical: The numerical dtype data in dataset
:type numerical: numpy.ndarray
:param print_steps: Boolean value that allows to print the steps of each phase in NSC algorithm
:type print_steps: bool
:param print_classification: Boolean value that allows to print the classification at each iteration
:type print_classification: bool
:param prints: A list that stores all the prints
:type prints: list
:param time_step: The time step for the neuron model
:type time_step: int or float
:param target: The column name that represents the target
:type target: str
:param threshold: The hypersphere radius
:type threshold: int or float
:param w_max: Maximum value of the coupling force
:type w_min: int or float
:param w_min: Minimum value of the coupling force
:type w_min: int or float
:param w_step: Coupling force step at reinforcement and punishment
:type w_step: int or float
:param weights: A matrix that stores all the coupling force between neurons i and j
:type weights: numpy.ndarray
:param X: Attributes data
:type X: numpy.ndarray
:param Y: Output data
:type Y: numpy.ndarray
:param y_predicted: Predicted output data
:type y_predicted: numpy.ndarray
"""
def __init__(self,*args,**kwargs):
""" Instantiation """
self.preprocessing = Preprocessing()
self.similarity = Similarity()
self.neuron = Neuron()
""" Attributes """
self.alpha = kwargs.get('alpha')
self.beta = kwargs.get('beta')
self.capacity = kwargs.get('capacity')
self.categorical = kwargs.get('categorical')
self.classified = None
self.classified_number = 0
self.confusion_matrix = None
self.data = kwargs.get('data')
self.degree_out = kwargs.get('degree_out')
self.distance_name = kwargs.get('similarity')
self.disputed = np.array([])
self.found = {}
self.gamma = kwargs.get('gamma')
self.incident_degree = kwargs.get('incident')
self.indexes_of = {}
self.inner_degree = {}
self.labels_array = kwargs.get('labels_array')
self.labels_list = kwargs.get('labels')
self.max_std = kwargs.get('max_std')
self.neighbors = kwargs.get('neighbors')
self.neuron.name = kwargs.get('model')
self.neurons = kwargs.get('neurons')
self.numerical = kwargs.get('numerical')
self.print_steps = kwargs.get('print_steps')
self.print_classification =kwargs.get('print_info')
self.prints = []
self.search_expand = kwargs.get('expand')
self.time_step = kwargs.get('time_step')
self.target= kwargs.get('target')
self.threshold = kwargs.get('threshold')
self.w_max = kwargs.get('w_max')
self.w_min = kwargs.get('w_min')
self.w_step = kwargs.get('w_step')
self.weights = kwargs.get('weights')
self.X = kwargs.get('X')
self.Y = kwargs.get('Y')
self.y_predicted = kwargs.get('y_predict')
""" Code """
if isinstance(self.distance_name,str):
self.distance = self.similarity.chosen[self.distance_name]
if (not isinstance(self.data,type(None))) and (isinstance(self.neuron.name,str)):
self.neurons = self.data.shape[0]
self.neuron.choose_model(self.neurons)
self.w_max = self.neuron.max_force
self.w_min = self.neuron.min_force
self.y_predicted = -np.ones(shape=(self.neurons))
self.degree_out = np.zeros(shape=(self.data.shape[0], np.unique(self.data['target']).size+1))
self.capacity = np.zeros(shape=(self.neurons))
if isinstance(self.labels_array,type(None)):
self.labels_array = - np.ones(shape=(self.neurons,self.neurons))
if isinstance(self.incident_degree,type(None)):
self.incident_degree = np.zeros(shape=(self.data.shape[0],np.unique(self.data['target']).size))
def calculate_similarity(self, x, y, axis=1):
"""
.. py::function:
This function calculates the similarity between the samples x and y
:param x: Sample(s) x
:type x: numpy.ndarray
:param y: Sample(s) y
:type y: numpy.ndarray
:param axis: axis along theoperation will be realized
:type axis: int
"""
return self.distance(x,y,axis=axis)
def create_connection(self,distance):
"""
.. py::function:
This function creates the connection between neurons based on their distances values
:param distance: A matrix of distances between neurons i and j
:type distance: numpy.ndarray
"""
return (self.w_min + .5*(self.w_max-self.w_min))*np.exp(-self.alpha*distance)
def initialize_weights(self, data_dtype):
"""
..py::function:
This function initializes the weights, the inner degree and the labels array
of the initial labeled data.
:param data_dtype: Attributes data of a specific data type
:type data_dtype: numpy.ndarray
"""
self.weights = np.zeros(shape=(self.data.shape[0],self.data.shape[0]))
for label in self.labels_list:
labeled = self.indexes_of['label'][label]
row_grid, col_grid = np.meshgrid(labeled, labeled.T)
rows_total = row_grid.flatten()
cols_total = col_grid.flatten()
inds = np.where(rows_total==cols_total)[0]
rows = np.delete(rows_total,inds)
columns = np.delete(cols_total,inds)
distances = self.calculate_similarity(
data_dtype[labeled,None,:], data_dtype[labeled,:], axis=2
)
distances = distances.flatten()
distances= np.delete(distances,inds)
self.weights[rows,columns] = self.create_connection(distances)
self.inner_degree[label] = int(len(self.indexes_of['label'][label])*(len(self.indexes_of['label'][label])-1))
self.labels_array[rows,columns] = label
self.labels_array[columns,rows] = label
def set_parameters(self, **kwargs):
"""
..py::function:
This function set the parameters (class attributes)
"""
for key, value in kwargs.items:
setattr(self,key,value)
self.__init__()
def get_parameters(self):
"""
.. py::function:
This function get the parameters (class attributes)
"""
print(self.__dict__)
def connection_search(self, unlabeled, data_dtype):
"""
.. py::function:
This function searchs for new connections
:param unlabeled: unlabeled data point index
:type unlabeled: int
:param data_dtype: Attributes data of a specific data type
:type data_dtype: numpy.ndarray
"""
self.prints.append("\nSearch Phase ... \n")
self.classified = None
self.found = {label:np.array([]) for label in self.labels_list}
for label in self.labels_list:
similarities = self.calculate_similarity(
data_dtype[self.indexes_of['label'][label],:],
data_dtype[self.indexes_of['unlabel'][unlabeled],None,:]
)
self.neurons_found = np.where(similarities <= self.threshold)[0]
if len(self.neurons_found) > 0:
found_indexes = self.indexes_of['label'][label][self.neurons_found]
below_capacity_indexes = found_indexes[np.where(self.capacity[found_indexes]<self.neighbors)[0]]
self.neurons_below_capacity = below_capacity_indexes
if self.neurons_below_capacity.size > 0:
self.classified = self.indexes_of['unlabel'][unlabeled]
self.weights[self.indexes_of['label'][label][self.neurons_found], self.indexes_of['unlabel'][unlabeled]] = self.create_connection(similarities[self.neurons_found])
self.labels_array[self.indexes_of['label'][label][self.neurons_found], self.indexes_of['unlabel'][unlabeled]] = label
self.degree_out[self.indexes_of['unlabel'][unlabeled],label] = self.neurons_found.size
self.degree_out[self.indexes_of['unlabel'][unlabeled],-1] += self.neurons_found.size
self.found[label] = self.indexes_of['label'][label][self.neurons_found]
self.inner_degree[label] += self.neurons_found.size
self.capacity[self.neurons_below_capacity] += 1
if isinstance(self.classified,type(None)):
self.prints.append("Nothing found!\n")
else:
self.prints.append("Found neuron {}\n".format(self.indexes_of['unlabel'][unlabeled]))
def find_multiple_incidence(self,unlabeled):
"""
.. py::function:
This function searchs if there are multiple incidence in the unlabeled data sample.
:param unlabeled: unlabeled data point index
:type unlabeled: int
"""
unlabeled = self.indexes_of['unlabel'][unlabeled]
self.incoming_labels = self.labels_array[:,unlabeled]
self.incoming = len(np.where(self.labels_array[:,unlabeled]!=-1)[0])
self.uniques = np.unique(self.incoming_labels)
self.size = len(self.uniques)
if self.size > 2:
self.prints.append("There are multiple incidence in {}\n".format(unlabeled))
if unlabeled not in self.disputed:
self.disputed = np.append(self.disputed,unlabeled)
degree_out = self.degree_out[unlabeled,:]
for label in self.labels_list:
if degree_out[label] != 0:
self.incident_degree[unlabeled,label-1] = np.sum(degree_out[np.where(np.array(self.labels_list)!=label)])
else:
self.prints.append("There are no multiple incidence!\n")
for label in self.labels_list:
if self.found[label].size > 0:
self.indexes_of['label'][label] = np.setdiff1d(
np.unique(np.where(self.labels_array==label)[1]),self.disputed
)
self.weights[unlabeled,self.found[label]] = self.weights[self.found[label],unlabeled]
self.labels_array[unlabeled, self.found[label]] = label
def cut_connections(self):
"""
.. py::function:
This function cuts the connections if there are coupling forces (weights) below the minimum coupling force (w_min)
"""
self.prints.append("Cutting Conections Phase ... \n")
self.rows_cuted, self.cols_cuted = np.where((self.weights!=0)&(self.weights<self.w_min))
self.disputed_cols, index, count = np.unique(self.cols_cuted,return_index=True, return_counts=True)
if self.disputed_cols.size > 0:
self.prints.append("There are cuts\n")
for i in range(self.disputed_cols.size):
self.disconnected = self.rows_cuted[np.where(self.disputed_cols[i]==self.cols_cuted)]
self.indexes = {}
self.weights_means = {}
self.indexes_size = np.zeros((len(self.labels_list)))
for label in self.labels_list:
indexes = np.intersect1d(self.disconnected, np.where(self.labels_array[:,self.disputed_cols[i]]==label)[0])
self.indexes[label] = indexes
self.indexes_size[label-1] = indexes.size
if indexes.size > 0:
disputed = np.repeat(self.disputed_cols[i], indexes.size)
self.weights_means[label] = np.mean(self.weights[indexes,disputed])
else:
self.weights_means[label] = 0
if self.disconnected.size == self.degree_out[self.disputed_cols[i],-1]:
self.prints.append("All connections have been severed\n")
label, max_weight = max(self.weights_means.items(), key=operator.itemgetter(1))
intersec = np.setdiff1d(self.disconnected, self.indexes[label])
self.capacity[intersec] -= 1
self.weights[intersec,np.repeat(self.disputed_cols[i],intersec.size)] = 0
self.weights[self.indexes[label],np.repeat(self.disputed_cols[i],self.indexes[label].size)] = self.w_min
self.labels_array[intersec, np.repeat(self.disputed_cols[i],intersec.size)] = -1
self.degree_out[self.disputed_cols[i],np.where(np.array(self.labels_list).astype(int)!=label)] = 0
self.degree_out[self.disputed_cols[i],-1] = self.degree_out[self.disputed_cols[i],label]
self.incident_degree[self.disputed_cols[i],:] = 0
for key,value in self.indexes.items():
if key!=label and value.size>0:
self.inner_degree[key] -= value.size
self.indexes_of['label'][label] = np.append(self.indexes_of['label'][label],self.disputed_cols[i])
self.disputed = np.setdiff1d(self.disputed, self.disputed_cols[i])
elif self.disconnected.size < self.degree_out[self.disputed_cols[i],-1]:
self.prints.append("Only a few connections have been severed\n")
for label in self.labels_list:
if self.indexes[label].size > 0:
self.weights[self.indexes[label],np.repeat(self.disputed_cols[i],self.indexes[label].size)] = 0
self.capacity[self.indexes[label]] -= 1
self.labels_array[self.indexes[label], np.repeat(self.disputed_cols[i], self.indexes[label].size)] = -1
self.inner_degree[label] -= self.indexes[label].size
incident =0
if self.indexes_size[label-1] < self.degree_out[self.disputed_cols[i],label-1]:
for l in self.labels_list:
if l != label:
incident+=self.indexes[l].size
elif self.indexes_size[label-1]==self.degree_out[self.disputed_cols[i],label-1]:
incident = self.incident_degree[self.disputed_cols[i],label-1]
self.incident_degree[self.disputed_cols[i],label-1]-=incident
self.degree_out[self.disputed_cols[i],label]-=self.indexes[label].size
self.degree_out[self.disputed_cols[i],-1]-=self.indexes[label].size
unique_labels = np.unique(self.labels_array[:,self.disputed_cols[i]])
if unique_labels.size==2:
label = int(unique_labels[1])
self.indexes_of['label'][label] = np.append(self.indexes_of['label'][label],self.disputed_cols[i])
self.disputed = np.setdiff1d(self.disputed,self.disputed_cols[i])
self.incident_degree[self.disputed_cols[i],:] = 0
else:
self.prints.append("There are no cuts!")
def update_weights(self):
"""
.. py::function:
This function updates the weights values with the reinforce and punishment terms and
then cut the connections by calling the cut_connections() method.
"""
self.prints.append("Update Weights Phase...\n")
for label in self.labels_list:
self.row, self.col = np.where(self.labels_array==label)
self.reinforce_exp = self.inner_degree[label]*np.exp(-self.gamma*self.incident_degree[self.col,label-1])
self.punish_exp = self.incident_degree[self.col,label-1]*self.beta
self.weights[self.row,self.col] = self.weights[self.row,self.col] + self.w_step*(1 - np.exp(-self.reinforce_exp)) \
- self.w_step*(1 - np.exp(-self.punish_exp))
self.weights[self.row,self.col] = np.where(self.weights[self.row,self.col]>self.w_max,self.w_max,self.weights[self.row,self.col])
self.cut_connections()
def preprocess_data(self, shuffle = True, split = True, set_null = True, not_null = None, get_indexes = True, standarlize=True):
"""
.. py::function:
This function preprocess the dataset.
:param shuffle: Boolean value that determines if will shuffle or not the data. Default is True.
:type shuffle: bool
:param split: Boolean value that determines if will split the data into features, numerical data, categorical data and output.
Default is True.
:type split: bool
:param set_null: Boolean value that determines if will set some output values to Null.
:type set_null: bool
:param not_null: Int, None or Dict values which determines how to choose the not null values. If int, then the program randomly
chooses this number of labeled data for each label, if None the program chooses randomly 10% of the data size,
if dict, for each label (key dict) the program choose the indexes determined in the dict (values dict).
:type not_null: int, None or dict
:param get_indexes: Boolean value that determines if will get or not the labeled and unlabeled data indexes
:type get_indexes: bool
:param standarlize: Boolean value that determines if will standarlize the dataset
:type standarlize: bool
"""
self.labels_list = self.preprocessing.get_labels(self.data, self.target)
if shuffle == True:
self.data = self.data.sample(frac=1).reset_index(drop=True)
if split == True:
self.X, self.categorical, self.numerical, self.Y = self.preprocessing.split_data(self.data, self.target)
if set_null == True:
if isinstance(not_null, int):
self.preprocessing.set_null_values(self.data, self.target, self.labels_list,label_size=not_null)
elif isinstance(not_null, dict):
self.preprocessing.set_null_values(self.data, self.target, label_dict=not_null)
elif isinstance(not_nul,type(None)):
self.preprocessing.set_null_values(self.data, self.target, self.labels_list,label_size=int(0.1*self.data.shape[0]))
if standarlize == True:
if split == True:
scaler = StandardScaler()
self.X = scaler.fit_transform(self.X)
self.numerical = scaler.fit_transform(self.numerical)
elif split == False:
features = self.data.drop(columns=[self.target],axis=1)
numeric = features.select_dtypes(include=np.number)
numeric_names = numeric.columns
self.data.loc[:,numeric_names] = (numeric-numeric.mean())/numeric.std()
if get_indexes == True:
self.indexes_of['unlabel'], self.indexes_of['label'] = self.preprocessing.get_label_unlabel_inds(self.data, self.target, self.labels_list)
self.indexes_of['unlabel'] = np.array(self.indexes_of['unlabel'])
print("\n-------------------The data has been preprocessed --------------------\n")
def fit(self, epochs, data_dtype):
"""
.. py::function:
This function fits the NSC algorithm at the dataset.
:param epochs: Number of time, preferably a number higher than the number of neurons.
:type epochs: int
:param data_dtype: Attributes data of a specific data type
:type data_dtype: numpy.ndarray
"""
self.initialize_weights(data_dtype)
self.neuron.potential = np.zeros(shape=(epochs,len(self.neuron.variables),self.neurons))
i = 0
j = 0
num = pd.DataFrame(data_dtype)
if self.distance_name=='Euclidean':
self.max_std = 0
for label in self.labels_list:
self.classified_number = self.classified_number + self.indexes_of['label'][label].size
std = num.loc[self.indexes_of['label'][label],:].std().max()
if std > self.max_std:
self.max_std = std
self.threshold = 6*self.max_std/self.search_expand
while i < epochs or self.classified_number < self.neurons:
self.prints = []
self.classified_number = 0
if j >= self.indexes_of['unlabel'].size:
self.prints.append("It has reached the end of the Unlabeled array, returning to index 0 \n")
j = 0
if self.indexes_of['unlabel'].size>0:
self.connection_search(j,data_dtype) # OKAY
self.find_multiple_incidence(j)
self.update_weights()
#self.neuron.variables = self.neuron.dynamic(self.time_step, self.weights, self.neuron.variables)
#self.neuron.potential[i] = np.array([var for var in list(self.neuron.variables)])
if not isinstance(self.classified,type(None)):
self.indexes_of['unlabel'] = np.setdiff1d(self.indexes_of['unlabel'],self.classified)
else:
j = j+1
i = i+1
if i == epochs:
break
if self.indexes_of['unlabel'].size !=0:
self.threshold = self.threshold + 6*self.max_std/self.search_expand
for label in self.labels_list:
self.classified_number = self.classified_number + self.indexes_of['label'][label].size
if self.print_steps == True:
for k in range(len(self.prints)):
print(self.prints[k])
if self.print_classification == True:
print("\nIteration: ", i)
print("Disputed Neurons: ", self.disputed)
print("Classified: ", self.classified_number)
print("Unlabeled: ", self.indexes_of['unlabel'].size)
for label in self.labels_list:
self.y_predicted[self.indexes_of['label'][label]] = label
self.confusion_matrix= confusion_matrix(self.Y, self.y_predicted)
# elif self.distance_name=='Hamming':
# self.max_std = self.neurons*0.5/6
# self.threshold = 1/self.neurons
# while i < epochs or self.classified_number < self.neurons:
# self.prints = []
# self.classified_number = 0
# if j >= self.indexes_of['unlabel'].size:
# self.prints.append("It has reached the end of the Unlabeled array, returning to index 0 \n")
# j = 0
# if self.indexes_of['unlabel'].size>0:
# self.connection_search(j,data_dtype) # OKAY
# self.find_multiple_incidence(j)
# self.update_weights()
# #print('Max: ', self.weights.max(), ', Min: ', self.weights.min())
# # self.neuron.variables = self.neuron.dynamic(self.time_step, self.weights, self.neuron.variables)
# # self.neuron.potential[i] = np.array([var for var in list(self.neuron.variables)])
# if not isinstance(self.classified,type(None)):
# self.indexes_of['unlabel'] = np.setdiff1d(self.indexes_of['unlabel'],self.classified)
# else:
# j = j+1
# i = i+1
# if i == epochs:
# break
# if self.indexes_of['unlabel'].size !=0:
# self.threshold = self.threshold + 6*self.max_std/self.search_expand
# for label in self.labels_list:
# self.classified_number = self.classified_number + self.indexes_of['label'][label].size
# if self.classified_number == self.neurons:
# break
# # std = num.loc[self.indexes_of['label'][label],:].std().max()
# # if std > self.max_std:
# # self.max_std = std
# # if self.print_steps == True:
# # for k in range(len(self.prints)):
# # print(self.prints[k])
# # if self.print_classification == True:
# # print("\nIteration: ", i)
# # print("Disputed Neurons: ", self.disputed)
# # print("Classified: ", self.classified_number)
# # print("Unlabeled: ", self.indexes_of['unlabel'].size)
# for label in self.labels_list:
# self.y_predicted[self.indexes_of['label'][label]] = label
# self.confusion_matrix= confusion_matrix(self.Y, self.y_predicted)
def predict(self, input_array, data_dtype):
"""
.. py::function:
This function predicts the labels of the input_array
:param input_array: The feature data to predict their classes.
:type input_array: numpy.ndarray
:param data_dtype: Attributes data of a specific data type
:type data_dtype: numpy.ndarray
"""
data_dtype = data_dtype.copy()
data_dtype = np.vstack((data_dtype,input_array))
prediction = -np.ones(input_array.shape[0])
labeled_by = self.indexes_of['label']
weights = np.zeros(shape=(self.neurons+input_array.shape[0],self.neurons+input_array.shape[0]),dtype=self.weights.dtype)
weights[:self.weights.shape[0],:self.weights.shape[1]] = self.weights
labels_array = - np.ones(shape=(self.neurons+input_array.shape[0],self.neurons+input_array.shape[0]),dtype=self.labels_array.dtype)
labels_array[:self.labels_array.shape[0],:self.labels_array.shape[1]] = self.labels_array
inner_degree = self.inner_degree
incident_degree = np.vstack((self.incident_degree,np.zeros(shape=(input_array.shape[0],self.incident_degree.shape[1]), dtype=self.incident_degree.dtype)))
degree_out = np.vstack((self.degree_out,np.zeros(shape=(input_array.shape[0], self.degree_out.shape[1]),dtype=self.degree_out.dtype)))
shift = input_array.shape[0]
disputed = np.array([])
for i in range(len(input_array)):
print(i)
""" SEARCH AND CONNECTIONS """
found = {label:np.array([]) for label in self.labels_list}
for label in self.labels_list:
similarities = self.calculate_similarity(
data_dtype[labeled_by[label],:],
input_array[i,None,:]
)
neurons_found = np.where(similarities<= self.threshold)[0]
if len(neurons_found) > 0:
found_indexes = labeled_by[label][neurons_found]
classified = input_array[i]
weights[labeled_by[label][neurons_found], i+self.neurons] = self.create_connection(similarities[neurons_found])
labels_array[labeled_by[label][neurons_found],i+self.neurons] = label
degree_out[i+self.neurons,label] = neurons_found.size
degree_out[i+self.neurons,-1] += neurons_found.size
found[label] = labeled_by[label][neurons_found]
inner_degree[label] += neurons_found.size
""" FIND MULTIPLE INCIDENCE """
incoming_labels = labels_array[:,i+self.neurons]
incoming = len(np.where(labels_array[:,i+self.neurons]!=-1)[0])
uniques = np.unique(incoming_labels)
size = len(uniques)
if size > 2:
if i+self.neurons not in disputed:
disputed = np.append(disputed,i+self.neurons)
degree_array_out = degree_out[i+self.neurons,:]
for label in self.labels_list:
if degree_array_out[label] != 0:
incident_degree[i+self.neurons,label] = np.sum(degree_array_out[np.where(np.array(self.labels_list)!=label)])
else:
for label in self.labels_list:
if found[label].size > 0:
labeled_by[label] = np.setdiff1d(
np.unique(np.where(labels_array==label)[1]),disputed
)
weights[i+self.neurons,found[label]] = weights[found[label],i+self.neurons]
labels_array[i+self.neurons, found[label]] = label
for label in self.labels_list:
row, col = np.where(labels_array==label)
reinforce_exp = inner_degree[label]*np.exp(-self.gamma*incident_degree[col,label])
punish_exp = incident_degree[col,label]*self.beta
weights[row,col] = weights[row,col] + self.w_step*(1 - np.exp(-reinforce_exp)) \
- self.w_step*(1 - np.exp(-punish_exp))
weights[row,col] = np.where(weights[row,col]>self.w_max,self.w_max,weights[row,col])
rows_cuted, cols_cuted = np.where((weights!=0)&(weights<self.w_min))
disputed_cols, index, count = np.unique(cols_cuted,return_index=True, return_counts=True)
if disputed_cols.size > 0:
for j in range(disputed_cols.size):
disconnected = rows_cuted[np.where(disputed_cols[j]==cols_cuted)]
indexes = {}
weights_means = {}
indexes_size = np.zeros((len(self.labels_list)))
for label in self.labels_list:
inds = np.intersect1d(disconnected, np.where(labels_array[:,disputed_cols[j]]==label)[0])
indexes[label] = inds
indexes_size[label] = inds.size
if inds.size > 0:
disput = np.repeat(disputed_cols[j], inds.size)
weights_means[label] = np.mean(weights[inds,disput])
else:
weights_means[label] = 0
if disconnected.size == degree_out[disputed_cols[j],-1]:
label, max_weight = max(weights_means.items(), key=operator.itemgetter(1))
intersec = np.setdiff1d(disconnected, indexes[label])
weights[intersec,np.repeat(disputed_cols[j],intersec.size)] = 0
weights[indexes[label],np.repeat(disputed_cols[j],indexes[label].size)] = self.w_min
labels_array[intersec, np.repeat(disputed_cols[j],intersec.size)] = -1
degree_out[disputed_cols[j],np.where(np.array(self.labels_list).astype(int)!=label)] = 0
degree_out[disputed_cols[j],-1] = degree_out[disputed_cols[j],label]
incident_degree[disputed_cols[j],:] = 0
for key,value in indexes.items():
if key!=label and value.size>0:
inner_degree[key] -= value.size
labeled_by[label] = np.append(labeled_by[label],disputed_cols[j])
disputed = np.setdiff1d(disputed, disputed_cols[j])
elif disconnected.size < degree_out[disputed_cols[j],-1]:
for label in self.labels_list:
if indexes[label].size > 0:
weights[indexes[label],np.repeat(disputed_cols[j],indexes[label].size)] = 0
labels_array[indexes[label], np.repeat(disputed_cols[j], indexes[label].size)] = -1
inner_degree[label] -= indexes[label].size
incident =0
if indexes_size[label] < degree_out[disputed_cols[j],label]:
for l in self.labels_list:
if l != label:
incident+=indexes[l].size
elif indexes_size[label]==degree_out[disputed_cols[j],label]:
incident = incident_degree[disputed_cols[j],label]
incident_degree[disputed_cols[j],label]-=incident
degree_out[disputed_cols[j],label]-=indexes[label].size
degree_out[disputed_cols[j],-1]-=indexes[label].size
unique_labels = np.unique(labels_array[:,disputed_cols[j]])
if unique_labels.size==2:
label = int(unique_labels[1])
labeled_by[label] = np.append(labeled_by[label],disputed_cols[j])
disputed = np.setdiff1d(disputed,disputed_cols[j])
incident_degree[disputed_cols[j],:] = 0
for label in self.labels_list:
labels = labeled_by[label][np.where(labeled_by[label]>=self.neurons)[0]]
prediction[labels.astype(int)-self.neurons] = label
return prediction
| {"hexsha": "334156dd623a6dc612cac8393f7b6552f6441bcf", "size": 51522, "ext": "py", "lang": "Python", "max_stars_repo_path": "nsc/neurongraph.py", "max_stars_repo_name": "GuilhermeToso/masters-project", "max_stars_repo_head_hexsha": "01d5acfddaedb3cbf7fa9247a88108530547e155", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-08-01T20:13:15.000Z", "max_stars_repo_stars_event_max_datetime": "2021-08-01T20:13:15.000Z", "max_issues_repo_path": "nsc/neurongraph.py", "max_issues_repo_name": "GuilhermeToso/masters-project", "max_issues_repo_head_hexsha": "01d5acfddaedb3cbf7fa9247a88108530547e155", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "nsc/neurongraph.py", "max_forks_repo_name": "GuilhermeToso/masters-project", "max_forks_repo_head_hexsha": "01d5acfddaedb3cbf7fa9247a88108530547e155", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 39.7546296296, "max_line_length": 183, "alphanum_fraction": 0.5817514848, "include": true, "reason": "import numpy,from scipy", "num_tokens": 11354} |
// Copyright (c) 2006 Johan Rade
// Copyright (c) 2011 Paul A. Bristow To incorporate into Boost.Math
// Distributed under the Boost Software License, Version 1.0.
// (See accompanying file LICENSE_1_0.txt
// or copy at http://www.boost.org/LICENSE_1_0.txt)
// test_nonfinite_trap.cpp
#ifdef _MSC_VER
# pragma warning(disable : 4702)
#endif
#define BOOST_TEST_MAIN
#include <boost/test/auto_unit_test.hpp>
#include "almost_equal.ipp" // Similar to BOOST_CLOSE_FRACTION.
#include "s_.ipp" // To create test strings like std::basic_string<CharType> s = S_("0 -0");
#include <boost/math/special_functions/nonfinite_num_facets.hpp>
#include <locale>
#include <sstream>
namespace {
// Using an anonymous namespace resolves ambiguities on platforms
// with fpclassify etc functions at global scope.
using namespace boost::math;
using boost::math::signbit;
using boost::math::changesign;
using boost::math::isnan;
//------------------------------------------------------------------------------
void trap_test_finite();
void trap_test_inf();
void trap_test_nan();
BOOST_AUTO_TEST_CASE(trap_test)
{
trap_test_finite();
trap_test_inf();
trap_test_nan();
}
//------------------------------------------------------------------------------
template<class CharType, class ValType> void trap_test_finite_impl();
void trap_test_finite()
{
trap_test_finite_impl<char, float>();
trap_test_finite_impl<char, double>();
trap_test_finite_impl<char, long double>();
trap_test_finite_impl<wchar_t, float>();
trap_test_finite_impl<wchar_t, double>();
trap_test_finite_impl<wchar_t, long double>();
}
template<class CharType, class ValType> void trap_test_finite_impl()
{
std::locale old_locale;
std::locale tmp_locale(old_locale,
new nonfinite_num_put<CharType>(trap_infinity | trap_nan));
std::locale new_locale(tmp_locale,
new nonfinite_num_get<CharType>(trap_infinity | trap_nan));
std::basic_stringstream<CharType> ss;
ss.imbue(new_locale);
ValType a1 = (ValType)1.2;
ValType a2 = (ValType)-3.5;
ValType a3 = (std::numeric_limits<ValType>::max)();
ValType a4 = -(std::numeric_limits<ValType>::max)();
ss << a1 << ' ' << a2 << ' ' << a3 << ' ' << a4;
ValType b1, b2, b3, b4;
ss >> b1 >> b2 >> b3 >> b4;
BOOST_CHECK(almost_equal(b1, a1));
BOOST_CHECK(almost_equal(b2, a2));
BOOST_CHECK(almost_equal(b3, a3));
BOOST_CHECK(almost_equal(b4, a4));
BOOST_CHECK(b3 != std::numeric_limits<ValType>::infinity());
BOOST_CHECK(b4 != -std::numeric_limits<ValType>::infinity());
BOOST_CHECK(ss.rdstate() == std::ios_base::eofbit);
ss.clear();
ss.str(S_(""));
ss << "++5";
ValType b5;
ss >> b5;
BOOST_CHECK(ss.rdstate() == std::ios_base::failbit);
}
//------------------------------------------------------------------------------
template<class CharType, class ValType> void trap_test_inf_impl();
template<class CharType, class ValType> void trap_test_put_inf_impl();
template<class CharType, class ValType> void trap_test_get_inf_impl();
void trap_test_inf()
{
trap_test_inf_impl<char, float>();
trap_test_inf_impl<char, double>();
trap_test_inf_impl<char, long double>();
trap_test_inf_impl<wchar_t, float>();
trap_test_inf_impl<wchar_t, double>();
trap_test_inf_impl<wchar_t, long double>();
}
template<class CharType, class ValType> void trap_test_inf_impl()
{
trap_test_put_inf_impl<CharType, ValType>();
trap_test_get_inf_impl<CharType, ValType>();
}
template<class CharType, class ValType> void trap_test_put_inf_impl()
{
std::locale old_locale;
std::locale new_locale(old_locale,
new nonfinite_num_put<CharType>(trap_infinity));
std::basic_stringstream<CharType> ss;
ss.imbue(new_locale);
ValType a1 = std::numeric_limits<ValType>::infinity();
ss << a1;
BOOST_CHECK(ss.rdstate() == std::ios_base::failbit
|| ss.rdstate() == std::ios_base::badbit);
ss.clear();
ValType a2 = -std::numeric_limits<ValType>::infinity();
ss << a2;
BOOST_CHECK(ss.rdstate() == std::ios_base::failbit
|| ss.rdstate() == std::ios_base::badbit);
}
template<class CharType, class ValType> void trap_test_get_inf_impl()
{
std::locale old_locale;
std::locale tmp_locale(old_locale, new nonfinite_num_put<CharType>);
std::locale new_locale(tmp_locale,
new nonfinite_num_get<CharType>(trap_infinity));
std::basic_stringstream<CharType> ss;
ss.imbue(new_locale);
ValType a1 = std::numeric_limits<ValType>::infinity();
ss << a1;
ValType b1;
ss >> b1;
BOOST_CHECK(ss.rdstate() == std::ios_base::failbit);
ss.clear();
ss.str(S_(""));
ValType a2 = -std::numeric_limits<ValType>::infinity();
ss << a2;
ValType b2;
ss >> b2;
BOOST_CHECK(ss.rdstate() == std::ios_base::failbit);
}
//------------------------------------------------------------------------------
template<class CharType, class ValType> void trap_test_nan_impl();
template<class CharType, class ValType> void trap_test_put_nan_impl();
template<class CharType, class ValType> void trap_test_get_nan_impl();
void trap_test_nan()
{
trap_test_nan_impl<char, float>();
trap_test_nan_impl<char, double>();
trap_test_nan_impl<char, long double>();
trap_test_nan_impl<wchar_t, float>();
trap_test_nan_impl<wchar_t, double>();
trap_test_nan_impl<wchar_t, long double>();
}
template<class CharType, class ValType> void trap_test_nan_impl()
{
trap_test_put_nan_impl<CharType, ValType>();
trap_test_get_nan_impl<CharType, ValType>();
}
template<class CharType, class ValType> void trap_test_put_nan_impl()
{
std::locale old_locale;
std::locale new_locale(old_locale,
new nonfinite_num_put<CharType>(trap_nan));
std::basic_stringstream<CharType> ss;
ss.imbue(new_locale);
ValType a1 = std::numeric_limits<ValType>::quiet_NaN();
ss << a1;
BOOST_CHECK(ss.rdstate() == std::ios_base::failbit
|| ss.rdstate() == std::ios_base::badbit);
ss.clear();
ValType a2 = std::numeric_limits<ValType>::signaling_NaN();
ss << a2;
BOOST_CHECK(ss.rdstate() == std::ios_base::failbit
|| ss.rdstate() == std::ios_base::badbit);
}
template<class CharType, class ValType> void trap_test_get_nan_impl()
{
std::locale old_locale;
std::locale tmp_locale(old_locale, new nonfinite_num_put<CharType>);
std::locale new_locale(tmp_locale,
new nonfinite_num_get<CharType>(trap_nan));
std::basic_stringstream<CharType> ss;
ss.imbue(new_locale);
ValType a1 = std::numeric_limits<ValType>::quiet_NaN();
ss << a1;
ValType b1;
ss >> b1;
BOOST_CHECK(ss.rdstate() == std::ios_base::failbit);
ss.clear();
ss.str(S_(""));
ValType a2 = std::numeric_limits<ValType>::signaling_NaN();
ss << a2;
ValType b2;
ss >> b2;
BOOST_CHECK(ss.rdstate() == std::ios_base::failbit);
}
//------------------------------------------------------------------------------
} // anonymous namespace
| {"hexsha": "819833e1a021efe0113134397e8b1edf98b968b0", "size": 7084, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "boost/libs/math/test/test_nonfinite_trap.cpp", "max_stars_repo_name": "randolphwong/mcsema", "max_stars_repo_head_hexsha": "eb5b376736e7f57ff0a61f7e4e5a436bbb874720", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 12278.0, "max_stars_repo_stars_event_min_datetime": "2015-01-29T17:11:33.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-31T21:12:00.000Z", "max_issues_repo_path": "boost/libs/math/test/test_nonfinite_trap.cpp", "max_issues_repo_name": "randolphwong/mcsema", "max_issues_repo_head_hexsha": "eb5b376736e7f57ff0a61f7e4e5a436bbb874720", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 9469.0, "max_issues_repo_issues_event_min_datetime": "2015-01-30T05:33:07.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-31T16:17:21.000Z", "max_forks_repo_path": "boost/libs/math/test/test_nonfinite_trap.cpp", "max_forks_repo_name": "randolphwong/mcsema", "max_forks_repo_head_hexsha": "eb5b376736e7f57ff0a61f7e4e5a436bbb874720", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 892.0, "max_forks_repo_forks_event_min_datetime": "2015-01-29T16:26:19.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-20T07:44:30.000Z", "avg_line_length": 29.5166666667, "max_line_length": 93, "alphanum_fraction": 0.6511857708, "num_tokens": 1808} |
from flare.rbcm import RobustBayesianCommitteeMachine
from flare.gp import GaussianProcess
import os as os
import numpy as np
from flare.struc import Structure
from flare.env import AtomicEnvironment
from flare.gp_algebra import get_kernel_vector
TEST_DIR = os.path.dirname(__file__)
TEST_FILE_DIR = os.path.join(TEST_DIR, "test_files")
methanol_frames = Structure.from_file(
os.path.join(TEST_FILE_DIR, "methanol_frames.json")
)
methanol_envs = AtomicEnvironment.from_file(
os.path.join(TEST_FILE_DIR, "methanol_envs.json")
)
def test_basic():
rbcm = RobustBayesianCommitteeMachine()
assert isinstance(rbcm, RobustBayesianCommitteeMachine)
def test_expert_growth_and_training():
"""
Test that as data is added the data is allocated to experts correctly
and that various training algorithms can run correctly on it
:return:
"""
rbcm = RobustBayesianCommitteeMachine(ndata_per_expert=10)
for env in methanol_envs[:20]:
rbcm.add_one_env(env, env.force)
assert rbcm.n_experts == 2
for opt_algorithm in ["differential evolution"]:
rbcm.opt_algorithm = opt_algorithm
rbcm.hyps = np.array([2, 2, 2, 2, 2])
rbcm.maxiter = 2
rbcm.train(line_steps=5)
assert not np.array_equal([2, 2, 2, 2, 2], rbcm.hyps)
def test_prediction():
"""
Test that prediction functions works.
The RBCM in the 1-expert case *does not* reduce to a GP's predictions,
because the way the mean and variance is computed for each expert
is weighted based on the expert's performance on the entire dataset in a way
that does not yield 1 in the absence of other experts.
Hence, perform the relevant transformations on a GP's prediction
and check it against the RBCM's.
:return:
"""
prior_var = 0.1
rbcm = RobustBayesianCommitteeMachine(
ndata_per_expert=100,
prior_variance=prior_var,
)
gp = GaussianProcess()
envs = methanol_envs[:10]
for env in envs:
rbcm.add_one_env(env, env.force)
gp.add_one_env(env, env.force, train=False)
struc = methanol_frames[-1]
gp.update_db(struc, forces=struc.forces)
rbcm.update_db(struc, forces=struc.forces)
test_env = methanol_envs[-1]
for d in [1, 2, 3]:
assert np.array_equal(gp.hyps, rbcm.hyps)
rbcm_pred = rbcm.predict(test_env, d)
gp_pred = gp.predict(test_env, d)
gp_kv = get_kernel_vector(
gp.name,
gp.kernel,
gp.energy_force_kernel,
test_env,
d,
gp.hyps,
cutoffs=gp.cutoffs,
hyps_mask=gp.hyps_mask,
n_cpus=1,
n_sample=gp.n_sample,
)
gp_mean = np.matmul(gp_kv, gp.alpha)
assert gp_mean == gp_pred[0]
gp_self_kern = gp.kernel(
env1=test_env,
env2=test_env,
d1=d,
d2=d,
hyps=gp.hyps,
cutoffs=np.array((7, 3.5)),
)
gp_var_i = gp_self_kern - np.matmul(np.matmul(gp_kv.T, gp.ky_mat_inv), gp_kv)
gp_beta = 0.5 * (np.log(prior_var) - np.log(gp_var_i))
mean = gp_mean * gp_beta / gp_var_i
var = gp_beta / gp_var_i + (1 - gp_beta) / prior_var
pred_var = 1.0 / var
pred_mean = pred_var * mean
assert pred_mean == rbcm_pred[0]
assert pred_var == rbcm_pred[1]
def test_to_from_gp():
"""
To/from methods for creating new RBCMs
and turning them back into GPs
:return:
"""
gp = GaussianProcess()
for frame in methanol_frames:
gp.update_db(frame, forces=frame.forces)
rbcm = RobustBayesianCommitteeMachine.from_gp(gp)
new_gp = rbcm.get_full_gp()
test_env = methanol_envs[0]
for d in range(1, 4):
assert np.array_equal(gp.predict(test_env, d), new_gp.predict(test_env, d))
def test_io():
"""
Read / write methods
:return:
"""
rbcm = RobustBayesianCommitteeMachine(ndata_per_expert=3)
rbcm.update_db(methanol_frames[0], forces=methanol_frames[0].forces)
rbcm.write_model("test_model.pickle")
new_model = RobustBayesianCommitteeMachine.from_file("test_model.pickle")
test_env = methanol_envs[0]
assert np.array_equal(
rbcm.predict_force_xyz(test_env), new_model.predict_force_xyz(test_env)
)
def test_convenience_methods():
# TODO beef up these tests
rbcm = RobustBayesianCommitteeMachine(ndata_per_expert=3)
rbcm.update_db(methanol_frames[0], forces=methanol_frames[0].forces)
training_stats = rbcm.training_statistics
assert isinstance(training_stats, dict)
assert isinstance(str(rbcm), str)
| {"hexsha": "027648a8f6daed9fed13a52884d10fa00147d177", "size": 4696, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/test_rbcm.py", "max_stars_repo_name": "aaronchen0316/flare", "max_stars_repo_head_hexsha": "47a2a89af635dfec6b41a873625ac2411da14ebb", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "tests/test_rbcm.py", "max_issues_repo_name": "aaronchen0316/flare", "max_issues_repo_head_hexsha": "47a2a89af635dfec6b41a873625ac2411da14ebb", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tests/test_rbcm.py", "max_forks_repo_name": "aaronchen0316/flare", "max_forks_repo_head_hexsha": "47a2a89af635dfec6b41a873625ac2411da14ebb", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 27.9523809524, "max_line_length": 85, "alphanum_fraction": 0.6624787053, "include": true, "reason": "import numpy", "num_tokens": 1235} |
[STATEMENT]
lemma free_eta: "s \<rightarrow>\<^sub>\<eta> t \<Longrightarrow> loose_bvar1 t i = loose_bvar1 s i"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. s \<rightarrow>\<^sub>\<eta> t \<Longrightarrow> loose_bvar1 t i = loose_bvar1 s i
[PROOF STEP]
apply (induct arbitrary: i set: eta)
[PROOF STATE]
proof (prove)
goal (4 subgoals):
1. \<And>s T i. \<not> is_dependent s \<Longrightarrow> loose_bvar1 (decr 0 s) i = loose_bvar1 (Abs T (s $ Bv 0)) i
2. \<And>s t u i. \<lbrakk>s \<rightarrow>\<^sub>\<eta> t; \<And>i. loose_bvar1 t i = loose_bvar1 s i\<rbrakk> \<Longrightarrow> loose_bvar1 (t $ u) i = loose_bvar1 (s $ u) i
3. \<And>s t u i. \<lbrakk>s \<rightarrow>\<^sub>\<eta> t; \<And>i. loose_bvar1 t i = loose_bvar1 s i\<rbrakk> \<Longrightarrow> loose_bvar1 (u $ t) i = loose_bvar1 (u $ s) i
4. \<And>s t T i. \<lbrakk>s \<rightarrow>\<^sub>\<eta> t; \<And>i. loose_bvar1 t i = loose_bvar1 s i\<rbrakk> \<Longrightarrow> loose_bvar1 (Abs T t) i = loose_bvar1 (Abs T s) i
[PROOF STEP]
apply (simp_all cong: conj_cong)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<And>s i. \<not> is_dependent s \<Longrightarrow> loose_bvar1 (decr 0 s) i = loose_bvar1 s (Suc i)
[PROOF STEP]
using is_dependent_def loose_bvar1_decr''' loose_bvar1_decr''''
[PROOF STATE]
proof (prove)
using this:
is_dependent ?t \<equiv> loose_bvar1 ?t 0
\<lbrakk>loose_bvar1 ?t (Suc ?lev); ?lev' \<le> ?lev\<rbrakk> \<Longrightarrow> loose_bvar1 (decr ?lev' ?t) ?lev
\<lbrakk>\<not> loose_bvar1 ?t ?lev'; ?lev' \<le> ?lev; \<not> loose_bvar1 ?t (Suc ?lev)\<rbrakk> \<Longrightarrow> \<not> loose_bvar1 (decr ?lev' ?t) ?lev
goal (1 subgoal):
1. \<And>s i. \<not> is_dependent s \<Longrightarrow> loose_bvar1 (decr 0 s) i = loose_bvar1 s (Suc i)
[PROOF STEP]
by blast | {"llama_tokens": 763, "file": "Metalogic_ProofChecker_EtaNorm", "length": 4} |
import numpy as np
import plotly.offline as pyo
import plotly.graph_objs as go
np.random.seed(42)
random_x = np.random.randint(1, 101, 100)
random_y = np.random.randint(1, 101, 100)
print(type(random_x))
data = [go.Scatter(x=random_x,
y=random_y,
mode='markers',
marker=dict(
size=12,
color='rgb(51,204,153)',
symbol='pentagon',
line={'width': 2}
))]
layout = go.Layout(title='Hello First Plot',
xaxis={'title': 'My X AXIS'},
yaxis=dict(title='MY Y AXIS'),
hovermode='closest')
fig = go.Figure(data=data, layout=layout)
pyo.plot(fig, filename='scatter.html')
| {"hexsha": "643c61787cc691269655088935c5f107a6fb6e38", "size": 791, "ext": "py", "lang": "Python", "max_stars_repo_path": "Plotly/scatter_plots.py", "max_stars_repo_name": "jorge-garciadiego/Visualization-Dash", "max_stars_repo_head_hexsha": "68fe99ff5c3ecc99eea3845f38849669449d89d2", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Plotly/scatter_plots.py", "max_issues_repo_name": "jorge-garciadiego/Visualization-Dash", "max_issues_repo_head_hexsha": "68fe99ff5c3ecc99eea3845f38849669449d89d2", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Plotly/scatter_plots.py", "max_forks_repo_name": "jorge-garciadiego/Visualization-Dash", "max_forks_repo_head_hexsha": "68fe99ff5c3ecc99eea3845f38849669449d89d2", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 26.3666666667, "max_line_length": 49, "alphanum_fraction": 0.5069532238, "include": true, "reason": "import numpy", "num_tokens": 179} |
import pandas as pd
import numpy as np
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(prog='augment_snp_effects.py',
description='''
Augment SNP effects generated by simulate_effect_sizes.py
''')
parser.add_argument('--snp_effect_parquet', help='''
Parquet file for SNP effects
''')
parser.add_argument('--output', help='''
Output parquet name
''')
parser.add_argument('--rand_seed', type=int, help='''
Rand seed
''')
parser.add_argument('--augment_size', type=int, help='''
Number of traits to augment
''')
args = parser.parse_args()
import logging, time, sys, os
# configing util
logging.basicConfig(
level = logging.INFO,
stream = sys.stderr,
format = '%(asctime)s %(message)s',
datefmt = '%Y-%m-%d %I:%M:%S %p')
logging.info('Random seed = {}'.format(args.rand_seed))
np.random.seed(args.rand_seed)
logging.info('Loading parquet')
df = pd.read_parquet(args.snp_effect_parquet)
logging.info('Size = {} x {}'.format(df.shape[0], df.shape[1]))
logging.info('Augmenting: n = {}'.format(args.augment_size))
new_weights = np.random.normal(size=(df.shape[0], args.augment_size))
names = [ f'Additional_{i}' for i in range(args.augment_size) ]
df_new = pd.DataFrame(new_weights, columns=names)
df = pd.concat([df, df_new], axis=1)
logging.info('New size = {} x {}'.format(df.shape[0], df.shape[1]))
logging.info('Saving')
df.to_parquet(args.output)
| {"hexsha": "f45b4903f5f312d399c0232ed7cbf8c5427fcb74", "size": 1607, "ext": "py", "lang": "Python", "max_stars_repo_path": "submission/simulation/simulate_phenotypes/augment_snp_effects.py", "max_stars_repo_name": "liangyy/ukb_idp_genetic_arch", "max_stars_repo_head_hexsha": "562612af6c82684f13e0cf2cbe2adc9a8f31b29d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "submission/simulation/simulate_phenotypes/augment_snp_effects.py", "max_issues_repo_name": "liangyy/ukb_idp_genetic_arch", "max_issues_repo_head_hexsha": "562612af6c82684f13e0cf2cbe2adc9a8f31b29d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "submission/simulation/simulate_phenotypes/augment_snp_effects.py", "max_forks_repo_name": "liangyy/ukb_idp_genetic_arch", "max_forks_repo_head_hexsha": "562612af6c82684f13e0cf2cbe2adc9a8f31b29d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.7959183673, "max_line_length": 73, "alphanum_fraction": 0.6148102054, "include": true, "reason": "import numpy", "num_tokens": 409} |
import numpy as np
class GameInfo(object):
def __init__(self,
frame,
robot_action=None,
human_action=None,
scored=None,
puck_was_hit=False,
puck_is_at_the_bottom=False,
distance_decreased=False,
hit_the_border=False,
in_the_target=False):
self.frame = np.copy(frame)
if robot_action is None:
self.robot_action = np.zeros(2, dtype=np.float32)
else:
self.robot_action = np.copy(robot_action)
if human_action is None:
self.human_action = np.zeros(2, dtype=np.float32)
else:
self.human_action = np.copy(human_action)
self.puck_was_hit = puck_was_hit
self.scored = scored
self.puck_is_at_the_bottom = puck_is_at_the_bottom
self.distance_decreased = distance_decreased
self.hit_the_border = hit_the_border
self.in_the_target = in_the_target | {"hexsha": "cedb0a483680e4dcbaf67c37bf2ab5f242ab6d31", "size": 1072, "ext": "py", "lang": "Python", "max_stars_repo_path": "air_hockey/game_info.py", "max_stars_repo_name": "Gabo-Tor/air-hockey", "max_stars_repo_head_hexsha": "fc949075812da9c5fb5bbb511a717c4aa51640b3", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2019-02-03T17:16:03.000Z", "max_stars_repo_stars_event_max_datetime": "2020-09-18T18:04:52.000Z", "max_issues_repo_path": "air_hockey/game_info.py", "max_issues_repo_name": "Gabo-Tor/air-hockey", "max_issues_repo_head_hexsha": "fc949075812da9c5fb5bbb511a717c4aa51640b3", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "air_hockey/game_info.py", "max_forks_repo_name": "Gabo-Tor/air-hockey", "max_forks_repo_head_hexsha": "fc949075812da9c5fb5bbb511a717c4aa51640b3", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2020-02-25T03:50:04.000Z", "max_forks_repo_forks_event_max_datetime": "2021-11-16T15:09:29.000Z", "avg_line_length": 33.5, "max_line_length": 61, "alphanum_fraction": 0.5625, "include": true, "reason": "import numpy", "num_tokens": 216} |
//
// Copyright (c) 2002--2010
// Toon Knapen, Karl Meerbergen, Kresimir Fresl,
// Thomas Klimpel and Rutger ter Borg
//
// Distributed under the Boost Software License, Version 1.0.
// (See accompanying file LICENSE_1_0.txt or copy at
// http://www.boost.org/LICENSE_1_0.txt)
//
// THIS FILE IS AUTOMATICALLY GENERATED
// PLEASE DO NOT EDIT!
//
#ifndef BOOST_NUMERIC_BINDINGS_LAPACK_DRIVER_HPP
#define BOOST_NUMERIC_BINDINGS_LAPACK_DRIVER_HPP
#include <boost/numeric/bindings/lapack/driver/gbsv.hpp>
#include <boost/numeric/bindings/lapack/driver/gbsvx.hpp>
#include <boost/numeric/bindings/lapack/driver/gees.hpp>
#include <boost/numeric/bindings/lapack/driver/geesx.hpp>
#include <boost/numeric/bindings/lapack/driver/geev.hpp>
#include <boost/numeric/bindings/lapack/driver/geevx.hpp>
#include <boost/numeric/bindings/lapack/driver/gegv.hpp>
#include <boost/numeric/bindings/lapack/driver/gejsv.hpp>
#include <boost/numeric/bindings/lapack/driver/gels.hpp>
#include <boost/numeric/bindings/lapack/driver/gelsd.hpp>
#include <boost/numeric/bindings/lapack/driver/gelss.hpp>
#include <boost/numeric/bindings/lapack/driver/gelsy.hpp>
#include <boost/numeric/bindings/lapack/driver/gesdd.hpp>
#include <boost/numeric/bindings/lapack/driver/gesv.hpp>
#include <boost/numeric/bindings/lapack/driver/gesvd.hpp>
#include <boost/numeric/bindings/lapack/driver/gesvx.hpp>
#include <boost/numeric/bindings/lapack/driver/gges.hpp>
#include <boost/numeric/bindings/lapack/driver/ggesx.hpp>
#include <boost/numeric/bindings/lapack/driver/ggev.hpp>
#include <boost/numeric/bindings/lapack/driver/ggevx.hpp>
#include <boost/numeric/bindings/lapack/driver/ggglm.hpp>
#include <boost/numeric/bindings/lapack/driver/gglse.hpp>
#include <boost/numeric/bindings/lapack/driver/ggsvd.hpp>
#include <boost/numeric/bindings/lapack/driver/gtsv.hpp>
#include <boost/numeric/bindings/lapack/driver/gtsvx.hpp>
#include <boost/numeric/bindings/lapack/driver/hbev.hpp>
#include <boost/numeric/bindings/lapack/driver/hbevd.hpp>
#include <boost/numeric/bindings/lapack/driver/hbevx.hpp>
#include <boost/numeric/bindings/lapack/driver/hbgv.hpp>
#include <boost/numeric/bindings/lapack/driver/hbgvd.hpp>
#include <boost/numeric/bindings/lapack/driver/hbgvx.hpp>
#include <boost/numeric/bindings/lapack/driver/heev.hpp>
#include <boost/numeric/bindings/lapack/driver/heevd.hpp>
#include <boost/numeric/bindings/lapack/driver/heevr.hpp>
#include <boost/numeric/bindings/lapack/driver/heevx.hpp>
#include <boost/numeric/bindings/lapack/driver/hegv.hpp>
#include <boost/numeric/bindings/lapack/driver/hegvd.hpp>
#include <boost/numeric/bindings/lapack/driver/hegvx.hpp>
#include <boost/numeric/bindings/lapack/driver/hesv.hpp>
#include <boost/numeric/bindings/lapack/driver/hesvx.hpp>
#include <boost/numeric/bindings/lapack/driver/hpev.hpp>
#include <boost/numeric/bindings/lapack/driver/hpevd.hpp>
#include <boost/numeric/bindings/lapack/driver/hpevx.hpp>
#include <boost/numeric/bindings/lapack/driver/hpgv.hpp>
#include <boost/numeric/bindings/lapack/driver/hpgvd.hpp>
#include <boost/numeric/bindings/lapack/driver/hpgvx.hpp>
#include <boost/numeric/bindings/lapack/driver/hpsv.hpp>
#include <boost/numeric/bindings/lapack/driver/hpsvx.hpp>
#include <boost/numeric/bindings/lapack/driver/iter_gesv.hpp>
#include <boost/numeric/bindings/lapack/driver/iter_posv.hpp>
#include <boost/numeric/bindings/lapack/driver/pbsv.hpp>
#include <boost/numeric/bindings/lapack/driver/pbsvx.hpp>
#include <boost/numeric/bindings/lapack/driver/posv.hpp>
#include <boost/numeric/bindings/lapack/driver/posvx.hpp>
#include <boost/numeric/bindings/lapack/driver/ppsv.hpp>
#include <boost/numeric/bindings/lapack/driver/ppsvx.hpp>
#include <boost/numeric/bindings/lapack/driver/ptsv.hpp>
#include <boost/numeric/bindings/lapack/driver/ptsvx.hpp>
#include <boost/numeric/bindings/lapack/driver/sbev.hpp>
#include <boost/numeric/bindings/lapack/driver/sbevd.hpp>
#include <boost/numeric/bindings/lapack/driver/sbevx.hpp>
#include <boost/numeric/bindings/lapack/driver/sbgv.hpp>
#include <boost/numeric/bindings/lapack/driver/sbgvd.hpp>
#include <boost/numeric/bindings/lapack/driver/sbgvx.hpp>
#include <boost/numeric/bindings/lapack/driver/spev.hpp>
#include <boost/numeric/bindings/lapack/driver/spevd.hpp>
#include <boost/numeric/bindings/lapack/driver/spevx.hpp>
#include <boost/numeric/bindings/lapack/driver/spgv.hpp>
#include <boost/numeric/bindings/lapack/driver/spgvd.hpp>
#include <boost/numeric/bindings/lapack/driver/spgvx.hpp>
#include <boost/numeric/bindings/lapack/driver/spsv.hpp>
#include <boost/numeric/bindings/lapack/driver/spsvx.hpp>
#include <boost/numeric/bindings/lapack/driver/stev.hpp>
#include <boost/numeric/bindings/lapack/driver/stevd.hpp>
#include <boost/numeric/bindings/lapack/driver/stevr.hpp>
#include <boost/numeric/bindings/lapack/driver/stevx.hpp>
#include <boost/numeric/bindings/lapack/driver/syev.hpp>
#include <boost/numeric/bindings/lapack/driver/syevd.hpp>
#include <boost/numeric/bindings/lapack/driver/syevr.hpp>
#include <boost/numeric/bindings/lapack/driver/syevx.hpp>
#include <boost/numeric/bindings/lapack/driver/sygv.hpp>
#include <boost/numeric/bindings/lapack/driver/sygvd.hpp>
#include <boost/numeric/bindings/lapack/driver/sygvx.hpp>
#include <boost/numeric/bindings/lapack/driver/sysv.hpp>
#include <boost/numeric/bindings/lapack/driver/sysvx.hpp>
#endif
| {"hexsha": "ef08398e9274664a6b60791de17e8d1bf0b31f52", "size": 5362, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "externals/numeric_bindings/boost/numeric/bindings/lapack/driver.hpp", "max_stars_repo_name": "ljktest/siconos", "max_stars_repo_head_hexsha": "85b60e62beca46e6bf06bfbd65670089e86607c7", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 137.0, "max_stars_repo_stars_event_min_datetime": "2015-06-16T15:55:28.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-26T06:01:59.000Z", "max_issues_repo_path": "externals/numeric_bindings/boost/numeric/bindings/lapack/driver.hpp", "max_issues_repo_name": "ljktest/siconos", "max_issues_repo_head_hexsha": "85b60e62beca46e6bf06bfbd65670089e86607c7", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 381.0, "max_issues_repo_issues_event_min_datetime": "2015-09-22T15:31:08.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-14T09:05:23.000Z", "max_forks_repo_path": "externals/numeric_bindings/boost/numeric/bindings/lapack/driver.hpp", "max_forks_repo_name": "ljktest/siconos", "max_forks_repo_head_hexsha": "85b60e62beca46e6bf06bfbd65670089e86607c7", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 30.0, "max_forks_repo_forks_event_min_datetime": "2015-08-06T22:57:51.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-02T20:30:20.000Z", "avg_line_length": 51.5576923077, "max_line_length": 61, "alphanum_fraction": 0.8054830287, "num_tokens": 1568} |
import numpy as np
from .myqt import QT
import pyqtgraph as pg
from .cataloguecontroller import CatalogueController
from .traceviewer import CatalogueTraceViewer
from .peaklists import PeakList, ClusterPeakList
from .ndscatter import NDScatter
from .waveformviewer import WaveformViewer
from .similarity import SpikeSimilarityView, ClusterSimilarityView, ClusterRatioSimilarityView
from .pairlist import PairList
from .silhouette import Silhouette
from .waveformhistviewer import WaveformHistViewer
from .featuretimeviewer import FeatureTimeViewer
from .tools import ParamDialog, open_dialog_methods
from . import gui_params
from . import icons
import itertools
import datetime
import time
import webbrowser
class CatalogueWindow(QT.QMainWindow):
new_catalogue = QT.pyqtSignal(int)
def __init__(self, catalogueconstructor):
QT.QMainWindow.__init__(self)
self.setWindowIcon(QT.QIcon(':/main_icon.png'))
self.catalogueconstructor = catalogueconstructor
self.controller = CatalogueController(catalogueconstructor=catalogueconstructor)
self.traceviewer = CatalogueTraceViewer(controller=self.controller)
self.peaklist = PeakList(controller=self.controller)
self.clusterlist = ClusterPeakList(controller=self.controller)
self.ndscatter = NDScatter(controller=self.controller)
self.waveformviewer = WaveformViewer(controller=self.controller)
self.spikesimilarityview = SpikeSimilarityView(controller=self.controller)
self.clustersimilarityview = ClusterSimilarityView(controller=self.controller)
self.clusterratiosimilarityview = ClusterRatioSimilarityView(controller=self.controller)
self.pairlist = PairList(controller=self.controller)
self.silhouette = Silhouette(controller=self.controller)
self.waveformhistviewer = WaveformHistViewer(controller=self.controller)
self.featuretimeviewer = FeatureTimeViewer(controller=self.controller)
docks = {}
docks['waveformviewer'] = QT.QDockWidget('waveformviewer',self)
docks['waveformviewer'].setWidget(self.waveformviewer)
#self.tabifyDockWidget(docks['ndscatter'], docks['waveformviewer'])
self.addDockWidget(QT.Qt.RightDockWidgetArea, docks['waveformviewer'])
docks['waveformhistviewer'] = QT.QDockWidget('waveformhistviewer',self)
docks['waveformhistviewer'].setWidget(self.waveformhistviewer)
self.tabifyDockWidget(docks['waveformviewer'], docks['waveformhistviewer'])
docks['featuretimeviewer'] = QT.QDockWidget('featuretimeviewer',self)
docks['featuretimeviewer'].setWidget(self.featuretimeviewer)
self.tabifyDockWidget(docks['waveformhistviewer'], docks['featuretimeviewer'])
docks['traceviewer'] = QT.QDockWidget('traceviewer',self)
docks['traceviewer'].setWidget(self.traceviewer)
#self.addDockWidget(QT.Qt.RightDockWidgetArea, docks['traceviewer'])
self.tabifyDockWidget(docks['waveformviewer'], docks['traceviewer'])
docks['peaklist'] = QT.QDockWidget('peaklist',self)
docks['peaklist'].setWidget(self.peaklist)
self.addDockWidget(QT.Qt.LeftDockWidgetArea, docks['peaklist'])
docks['pairlist'] = QT.QDockWidget('pairlist',self)
docks['pairlist'].setWidget(self.pairlist)
self.splitDockWidget(docks['peaklist'], docks['pairlist'], QT.Qt.Horizontal)
docks['clusterlist'] = QT.QDockWidget('clusterlist',self)
docks['clusterlist'].setWidget(self.clusterlist)
self.tabifyDockWidget(docks['pairlist'], docks['clusterlist'])
#on bottom left
docks['spikesimilarityview'] = QT.QDockWidget('spikesimilarityview',self)
docks['spikesimilarityview'].setWidget(self.spikesimilarityview)
self.addDockWidget(QT.Qt.LeftDockWidgetArea, docks['spikesimilarityview'])
docks['clustersimilarityview'] = QT.QDockWidget('clustersimilarityview',self)
docks['clustersimilarityview'].setWidget(self.clustersimilarityview)
self.tabifyDockWidget(docks['spikesimilarityview'], docks['clustersimilarityview'])
docks['clusterratiosimilarityview'] = QT.QDockWidget('clusterratiosimilarityview',self)
docks['clusterratiosimilarityview'].setWidget(self.clusterratiosimilarityview)
self.tabifyDockWidget(docks['spikesimilarityview'], docks['clusterratiosimilarityview'])
docks['silhouette'] = QT.QDockWidget('silhouette',self)
docks['silhouette'].setWidget(self.silhouette)
self.tabifyDockWidget(docks['spikesimilarityview'], docks['silhouette'])
docks['ndscatter'] = QT.QDockWidget('ndscatter',self)
docks['ndscatter'].setWidget(self.ndscatter)
self.tabifyDockWidget(docks['spikesimilarityview'], docks['ndscatter'])
self.create_actions()
self.create_toolbar()
def create_actions(self):
self.act_make_catalogue = QT.QAction('Make catalogue for peeler', self,checkable = False, icon=QT.QIcon(":/document-save.svg"))
self.act_make_catalogue.triggered.connect(self.make_catalogue_for_peeler)
self.act_savepoint = QT.QAction('Savepoint', self,checkable = False, icon=QT.QIcon(":/document-save.svg"))
self.act_savepoint.triggered.connect(self.create_savepoint)
#~ self.act_refresh = QT.QAction('Refresh', self,checkable = False, icon=QT.QIcon.fromTheme("view-refresh"))
self.act_refresh = QT.QAction('Refresh', self,checkable = False, icon=QT.QIcon(":/view-refresh.svg"))
self.act_refresh.triggered.connect(self.refresh_with_reload)
self.act_redetect_peak = QT.QAction('New peaks', self,checkable = False, icon=QT.QIcon(":/configure-shortcuts.svg"))
self.act_redetect_peak.triggered.connect(self.redetect_peak)
self.act_new_waveforms = QT.QAction('New waveforms', self,checkable = False, icon=QT.QIcon(":/configure-shortcuts.svg"))
self.act_new_waveforms.triggered.connect(self.new_waveforms)
self.act_clean_waveforms = QT.QAction('Clean waveforms', self,checkable = False, icon=QT.QIcon(":/configure-shortcuts.svg"))
self.act_clean_waveforms.triggered.connect(self.clean_waveforms)
self.act_new_noise_snippet = QT.QAction('New noise snippet', self,checkable = False, icon=QT.QIcon(":/configure-shortcuts.svg"))
self.act_new_noise_snippet.triggered.connect(self.new_noise_snippet)
self.act_new_features = QT.QAction('New features', self,checkable = False, icon=QT.QIcon(":/configure-shortcuts.svg"))
self.act_new_features.triggered.connect(self.new_features)
self.act_new_cluster = QT.QAction('New cluster', self,checkable = False, icon=QT.QIcon(":/configure-shortcuts.svg"))
self.act_new_cluster.triggered.connect(self.new_cluster)
self.act_compute_metrics = QT.QAction('Compute metrics', self,checkable = False, icon=QT.QIcon(":/configure-shortcuts.svg"))
self.act_compute_metrics.triggered.connect(self.compute_metrics)
self.help_act = QT.QAction('Help', self,checkable = False, icon=QT.QIcon(":main_icon.png"))
self.help_act.triggered.connect(self.open_webbrowser_help)
def create_toolbar(self):
self.toolbar = QT.QToolBar('Tools')
self.toolbar.setToolButtonStyle(QT.Qt.ToolButtonTextUnderIcon)
self.addToolBar(QT.Qt.RightToolBarArea, self.toolbar)
self.toolbar.setIconSize(QT.QSize(60, 40))
self.toolbar.addAction(self.act_make_catalogue)
self.toolbar.addSeparator()
self.toolbar.addAction(self.act_refresh)
self.toolbar.addSeparator()
self.toolbar.addAction(self.act_redetect_peak)
self.toolbar.addAction(self.act_new_waveforms)
self.toolbar.addAction(self.act_clean_waveforms)
self.toolbar.addAction(self.act_new_noise_snippet)
self.toolbar.addAction(self.act_new_features)
self.toolbar.addAction(self.act_new_cluster)
self.toolbar.addAction(self.act_compute_metrics)
self.toolbar.addSeparator()
self.toolbar.addAction(self.help_act)
self.toolbar.addSeparator()
self.toolbar.addAction(self.act_savepoint)
def warn(self, title, text):
mb = QT.QMessageBox.warning(self, title,text, QT.QMessageBox.Ok , QT.QMessageBox.NoButton)
def open_webbrowser_help(self):
url = "http://tridesclous.readthedocs.io/en/latest/catalogue_window.html"
webbrowser.open(url, new=2)
def make_catalogue_for_peeler(self):
self.catalogueconstructor.make_catalogue_for_peeler()
self.new_catalogue.emit(self.catalogueconstructor.chan_grp)
def create_savepoint(self):
try:
copy_path = self.catalogueconstructor.create_savepoint()
except:
copy_path = None
if copy_path is None:
txt = 'Savepoint FAIL!!!'
else:
txt = 'Savepoint done here {}'.format(copy_path)
self.warn('savepoint', txt)
def refresh_with_reload(self):
self.controller.reload_data()
self.refresh()
def refresh(self):
self.controller.check_plot_attributes()
for w in self.controller.views:
#TODO refresh only visible but need catch on visibility changed
#~ print(w)
#~ t1 = time.perf_counter()
w.refresh()
#~ t2 = time.perf_counter()
#~ print('refresh',w, t2-t1)
def redetect_peak(self):
dia = ParamDialog(gui_params.peak_detector_params)
dia.resize(450, 500)
if dia.exec_():
d = dia.get()
self.catalogueconstructor.re_detect_peak(**d)
self.controller.init_plot_attributes()
self.refresh()
def new_waveforms(self):
dia = ParamDialog(gui_params.waveforms_params)
dia.resize(450, 500)
if dia.exec_():
d = dia.get()
self.catalogueconstructor.extract_some_waveforms(**d)
self.refresh()
def clean_waveforms(self):
dia = ParamDialog(gui_params.clean_waveforms_params)
dia.resize(450, 500)
if dia.exec_():
d = dia.get()
self.catalogueconstructor.clean_waveforms(**d)
self.refresh()
def new_noise_snippet(self):
dia = ParamDialog(gui_params.noise_snippet_params)
dia.resize(450, 500)
if dia.exec_():
d = dia.get()
self.catalogueconstructor.extract_some_noise(**d)
self.refresh()
def new_features(self):
method, kargs = open_dialog_methods(gui_params.features_params_by_methods, self)
if method is not None:
self.catalogueconstructor.extract_some_features(method=method, **kargs)
self.refresh()
def new_cluster(self):
method, kargs = open_dialog_methods(gui_params.cluster_params_by_methods, self)
if method is not None:
self.catalogueconstructor.find_clusters(method=method, **kargs)
self.refresh()
def compute_metrics(self):
dia = ParamDialog(gui_params.metrics_params)
dia.resize(450, 500)
if dia.exec_():
d = dia.get()
self.catalogueconstructor.compute_spike_waveforms_similarity(method=d['spike_waveforms_similarity'], size_max=d['size_max'])
self.catalogueconstructor.compute_cluster_similarity(method=d['cluster_similarity'])
self.catalogueconstructor.compute_cluster_ratio_similarity(method=d['cluster_ratio_similarity'])
self.catalogueconstructor.compute_spike_silhouette(size_max=d['size_max'])
#TODO refresh only metrics concerned
self.refresh()
| {"hexsha": "b9b1ea8c986a30498e0764c11494f11d061c21e9", "size": 11968, "ext": "py", "lang": "Python", "max_stars_repo_path": "tridesclous/gui/cataloguewindow.py", "max_stars_repo_name": "rdarie/tridesclous", "max_stars_repo_head_hexsha": "178c0a67d7b3ac88be8e4383001396c1e0f976c2", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "tridesclous/gui/cataloguewindow.py", "max_issues_repo_name": "rdarie/tridesclous", "max_issues_repo_head_hexsha": "178c0a67d7b3ac88be8e4383001396c1e0f976c2", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tridesclous/gui/cataloguewindow.py", "max_forks_repo_name": "rdarie/tridesclous", "max_forks_repo_head_hexsha": "178c0a67d7b3ac88be8e4383001396c1e0f976c2", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 43.2057761733, "max_line_length": 136, "alphanum_fraction": 0.6896724599, "include": true, "reason": "import numpy", "num_tokens": 2597} |
#include <fstream>
#include <set>
#include <sstream>
#include <string>
#include <boost/test/unit_test.hpp>
#include "utilities.h"
#include "relation_buffer.h"
using namespace cflr;
using namespace std;
BOOST_AUTO_TEST_SUITE( relation_buffer_test )
BOOST_AUTO_TEST_CASE( int_buffer ){
typedef relation_buffer<int, int> i2;
registrar<int> reg;
i2 buf(i2::reg_type{®, ®});
buf.add(i2::outer_type{5, 6});
buf.add(i2::outer_type{6, 6});
buf.add(i2::outer_type{6, 5});
buf.add(i2::outer_type{5, 5});
BOOST_CHECK(buf.size() == 4);
auto idx2 = buf.retrieve(2);
BOOST_CHECK(get<0>(idx2) == 6 && get<1>(idx2) == 5);
BOOST_CHECK(reg.get(buf[2][0]) == 6);
BOOST_CHECK(reg.get(buf[2][1]) == 5);
typedef relation_buffer<int, int, int> i3;
i3 buf2{i3::reg_type{®, ®, ®}};
buf2.add(i3::outer_type{4, 5, 6});
buf2.add(i3::outer_type{6, 7, 4});
BOOST_CHECK(reg.get_or_add(6) == buf2[0][2]);
BOOST_CHECK(reg.get_or_add(6) == buf2[1][0]);
stringstream ss;
buf2.to_csv(ss);
BOOST_CHECK(ss.str() == "4,5,6\n6,7,4\n");
}
BOOST_AUTO_TEST_CASE( field_volumes ){
registrar<int> r0;
registrar<int> r1;
r1.get_or_add(0);
registrar<int> r2;
r2.get_or_add(0);
r2.get_or_add(1);
registrar<int> r3;
r3.get_or_add(0);
r3.get_or_add(1);
r3.get_or_add(2);
// relations without fields have 1 adt
typedef relation_buffer<int, int> i2;
BOOST_CHECK(i2(i2::reg_type{&r3, &r3}).field_volume() == 1);
BOOST_CHECK(i2(i2::reg_type{&r0, &r2}).field_volume() == 1);
BOOST_CHECK(i2(i2::reg_type{&r1, &r0}).field_volume() == 1);
// relations with 1 field have the volume of that field's registrar
typedef relation_buffer<int, int, int> i3;
BOOST_CHECK(i3(i3::reg_type{&r1, &r2, &r0}).field_volume() == 0);
BOOST_CHECK(i3(i3::reg_type{&r0, &r2, &r1}).field_volume() == 1);
BOOST_CHECK(i3(i3::reg_type{&r3, &r3, &r2}).field_volume() == 2);
BOOST_CHECK(i3(i3::reg_type{&r1, &r0, &r3}).field_volume() == 3);
// relations with 2 fields multiply their volumes
typedef relation_buffer<int, int, int, int> i4;
BOOST_CHECK(i4(i4::reg_type{&r1, &r2, &r3, &r3}).field_volume() == 9);
BOOST_CHECK(i4(i4::reg_type{&r1, &r2, &r0, &r3}).field_volume() == 0);
BOOST_CHECK(i4(i4::reg_type{&r1, &r2, &r1, &r2}).field_volume() == 2);
// all fieldless relations index to volume 0
i2 buf2(i2::reg_type{&r2, &r1});
buf2.add(i2::outer_type{0, 0});
buf2.add(i2::outer_type{1, 0});
BOOST_CHECK(buf2.index_volume(0) == 0);
BOOST_CHECK(buf2.index_volume(1) == 0);
// identical field indices cause the same index volume
i3 buf3(i3::reg_type{&r3, &r3, &r1});
buf3.add(i3::outer_type{0, 2, 0});
buf3.add(i3::outer_type{2, 0, 0});
BOOST_CHECK(buf3.index_volume(0) == buf3.index_volume(1));
i4 buf4(i4::reg_type{&r1, &r2, &r3, &r3});
buf4.add(i4::outer_type{0, 0, 2, 0});
buf4.add(i4::outer_type{0, 0, 0, 2});
buf4.add(i4::outer_type{0, 0, 2, 1});
buf4.add(i4::outer_type{0, 1, 0, 2});
buf4.add(i4::outer_type{0, 1, 2, 0});
BOOST_CHECK(buf4.index_volume(0) == buf4.index_volume(4));
BOOST_CHECK(buf4.index_volume(1) == buf4.index_volume(3));
// index volumes increase with lefter registrars contributing more
typedef relation_buffer<int, int, int, int, int> i5;
i5 buf5(i5::reg_type{&r3, &r3, &r3, &r3, &r3});
for(int x=0; x<3*3*3*3*3; x++){
buf5.add(i5::outer_type{x/81, (x%81)/27, (x%27)/9, (x%9)/3, x%3});
}
bool pass=true;
for(unsigned i=0; pass && i<buf5.size(); ++i){
pass = buf5.index_volume(i) == (buf5[i][2]*9 + buf5[i][3]*3 + buf5[i][4]);
}
BOOST_CHECK(pass);
}
BOOST_AUTO_TEST_CASE( registrar_groups ){
typedef registrar_group<int, int, int> rg_t;
typedef relation_buffer<int, int> bi2;
rg_t group;
bi2 b01(group.select<0, 1>());
b01.add(bi2::outer_type{99, 100});
BOOST_CHECK(std::get<0>(group.group).size() == 1);
BOOST_CHECK(std::get<1>(group.group).size() == 1);
bi2 b10(group.select<1, 0>());
b10.add(bi2::outer_type{99, 99});
BOOST_CHECK(std::get<0>(group.group).size() == 1);
BOOST_CHECK(std::get<1>(group.group).size() == 2);
BOOST_CHECK(std::get<2>(group.group).size() == 0);
bi2 b00(group.select<0, 0>());
b10.add(bi2::outer_type{42, 42});
BOOST_CHECK(std::get<0>(group.group).size() == 2);
typedef relation_buffer<int, int, int, int> bi4;
bi4 b0222(group.select<0, 2, 2, 2>());
b0222.add(bi4::outer_type{1001, 1002, 1003, 1004});
array<size_t, 3> arr = group.volumes();
BOOST_CHECK(arr[0] = 3);
BOOST_CHECK(arr[1] = 2);
BOOST_CHECK(arr[2] = 3);
// these tests will fail to compile if something is wrong
typedef registrar_group<int, char, string, char> rg_t2;
rg_t2 rg;
relation_buffer<int, string> a(rg.select<0, 2>());
relation_buffer<string, int> b(rg.select<2, 0>());
relation_buffer<string, char> c(rg.select<2, 1>());
relation_buffer<string, char> d(rg.select<2, 3>());
}
BOOST_AUTO_TEST_CASE( multiple_domains ){
typedef relation_buffer<string, string, string> s3;
registrar<string> a;
registrar<string> b;
registrar<string> c;
s3 buf(s3::reg_type{&a, &b, &c});
buf.add(s3::outer_type{"cat","sat","mat"});
BOOST_CHECK(a.size() == 1);
BOOST_CHECK(b.size() == 1);
BOOST_CHECK(c.size() == 1);
buf.add(s3::outer_type{"cat", "sat", "sat"});
buf.add(s3::outer_type{"cat", "cat", "cat"});
BOOST_CHECK(a.size() == 1);
BOOST_CHECK(b.size() == 2);
BOOST_CHECK(c.size() == 3);
typedef relation_buffer<int, char, string> ics;
registrar<int> ri;
registrar<char> rc;
registrar<string> rs;
ics buf2(ics::reg_type{&ri, &rc, &rs});
buf2.add(ics::outer_type{42, 'X', "Hello World!"});
BOOST_CHECK(ri.size() == 1);
BOOST_CHECK(rc.size() == 1);
BOOST_CHECK(rs.size() == 1);
buf2.add(ics::outer_type{42, '.', "Hello World!"});
BOOST_CHECK(ri.size() == 1);
BOOST_CHECK(rc.size() == 2);
BOOST_CHECK(rs.size() == 1);
stringstream ss;
buf2.to_csv(ss);
BOOST_CHECK(ss.str() == "42,X,Hello World!\n42,.,Hello World!\n");
}
template<typename T, typename Tup, unsigned I>
struct tuple_filler {
inline static void fill(T t, Tup& tuple){
std::get<I-1>(tuple) = t;
tuple_filler<T, Tup, I-1>::fill(t, tuple);
}
};
template<typename T, typename Tup>
struct tuple_filler<T, Tup, 0>{
inline static void fill(T t, Tup& tuple){
}
};
template<typename T, typename...Ts>
void csv_test(const string& csv_path){
typedef relation_buffer<T, Ts...> RT;
// Read the CSV file to a relation, and abck out
registrar<T> reg;
typename RT::reg_type reg_array;
tuple_filler<registrar<T>*, typename RT::reg_type, sizeof...(Ts)+1>::fill(®, reg_array);
RT buf(reg_array);
buf.from_csv(csv_path);
stringstream ss;
buf.to_csv(ss);
// Read the file straight int a string
ifstream ifs(csv_path);
string contents((istreambuf_iterator<char>(ifs)), (istreambuf_iterator<char>())); // most vexing parse
auto idx = contents.find("\n\n");//double newlines are removed by the relation
while(idx != string::npos){
contents.replace(idx, 2, "\n");
idx = contents.find("\n\n");
}
BOOST_CHECK(ss.str() == contents);//make sure they are the same
}
BOOST_AUTO_TEST_CASE( csv_io ){
csv_test<string, string>("example/csv/foo.csv");
csv_test<int, int, int, int>("example/csv/bar.csv");
csv_test<char, char, char>("example/csv/baz.csv");
}
BOOST_AUTO_TEST_SUITE_END()
| {"hexsha": "630f633b35cc39c20ffd2b19de5feed1ef8f444c", "size": 8393, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "src/test/cpp/test_relation_buffer.cpp", "max_stars_repo_name": "cauliflower-cflr/cauliflower", "max_stars_repo_head_hexsha": "dc8ed39c60bcd8fd700d9da48aba124dd9d6d115", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3.0, "max_stars_repo_stars_event_min_datetime": "2019-04-28T13:34:43.000Z", "max_stars_repo_stars_event_max_datetime": "2020-11-08T15:35:58.000Z", "max_issues_repo_path": "src/test/cpp/test_relation_buffer.cpp", "max_issues_repo_name": "cauliflower-cflr/cauliflower", "max_issues_repo_head_hexsha": "dc8ed39c60bcd8fd700d9da48aba124dd9d6d115", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/test/cpp/test_relation_buffer.cpp", "max_forks_repo_name": "cauliflower-cflr/cauliflower", "max_forks_repo_head_hexsha": "dc8ed39c60bcd8fd700d9da48aba124dd9d6d115", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 38.3242009132, "max_line_length": 110, "alphanum_fraction": 0.5685690456, "num_tokens": 2548} |
"""
Test cases for functions/classes in nn_comparator.py
-- [email protected]
"""
# pylint: disable=no-member
# pylint: disable=invalid-name
import numpy as np
# Local imports
from . import otmann
from .unittest_neural_network import generate_cnn_architectures, \
generate_mlp_architectures
from ..utils.ancillary_utils import get_list_of_floats_as_str
from ..utils.base_test_class import BaseTestClass, execute_tests
_TOL = 1e-5
class TransportNNDistanceComputerTestCase(BaseTestClass):
""" Contains unit tests for the TransportNNDistanceComputer class. """
def __init__(self, *args, **kwargs):
""" Constructor. """
super(TransportNNDistanceComputerTestCase, self).__init__(*args, **kwargs)
self.non_assignment_penalty = 1
cnn_layer_labels, label_mismatch_penalty = \
otmann.get_cnn_layer_label_mismatch_penalties(self.non_assignment_penalty)
self.tp_comp = otmann.OTMANNDistanceComputer(cnn_layer_labels,
label_mismatch_penalty, self.non_assignment_penalty,
otmann.CNN_STRUCTURAL_PENALTY_GROUPS,
otmann.PATH_LENGTH_TYPES,
dflt_mislabel_coeffs=1.0, dflt_struct_coeffs=1.0,
dflt_dist_type='lp-emd')
self.cnns = generate_cnn_architectures()
def test_cnn_label_mismatch_penalties(self):
""" Unit test for the label mismatch penalty of a CNN. """
self.report('Testing generation of label mismatches for a CNN. ')
cnn_layer_labels, label_mismatch_penalty = \
otmann.get_cnn_layer_label_mismatch_penalties(self.non_assignment_penalty,
max_conv_size=9)
self.report('cnn_layer_labels: %s'%(str(cnn_layer_labels)), 'test_result')
self.report('cnn mismatch penalties: \n%s'%(str(np.round(label_mismatch_penalty, 3))),
'test_result')
assert np.all(label_mismatch_penalty == label_mismatch_penalty.T)
assert np.all(np.diag(label_mismatch_penalty) == 0)
def test_mlp_label_mismatch_penalties(self):
""" Unit test for the label mismatch penalty of an MLP. """
self.report('Testing generation of label mismatches for a MLP. ')
mlp_layer_labels, label_mismatch_penalty = \
otmann.get_mlp_layer_label_mismatch_penalties(self.non_assignment_penalty,
'reg')
self.report('mlp_layer_labels: %s'%((str(mlp_layer_labels))), 'test_result')
self.report('mlp mismatch penalties: \n%s'%(str(np.round(label_mismatch_penalty, 3))),
'test_result')
assert np.all(label_mismatch_penalty == label_mismatch_penalty.T)
assert np.all(np.diag(label_mismatch_penalty) == 0)
@classmethod
def _is_cost_matrix_for_same_networks(cls, cost_matrix):
""" Returns true if it is the cost matrix for the same network. """
return np.all(np.diag(cost_matrix) == 0) and np.all(cost_matrix == cost_matrix.T)
@classmethod
def _has_corresponding_layers(cls, cost_matrix):
""" Returns true if one network has a corresponding layer in the other and vice
versa. """
ret = True
for row_idx in range(cost_matrix.shape[0]):
ret = ret and np.any(cost_matrix[row_idx, :] == 0)
for col_idx in range(cost_matrix.shape[1]):
ret = ret and np.any(cost_matrix[:, col_idx] == 0)
return ret
def test_mislabel_cost_matrix(self):
""" Tests the mislabel cost matrix for specific pairs of neural networks. """
self.report('Testing generation of label cost matrices for specific cnns. ')
num_cnns = len(self.cnns)
for i in range(num_cnns):
for j in range(i+1, num_cnns):
cnn_i = self.cnns[i]
cnn_j = self.cnns[j]
mislabel_cost_matrix = self.tp_comp.get_mislabel_cost_matrix(cnn_i, cnn_j)
assert mislabel_cost_matrix.shape[0] == cnn_i.num_layers
assert mislabel_cost_matrix.shape[1] == cnn_j.num_layers
if i == j:
assert self._is_cost_matrix_for_same_networks(mislabel_cost_matrix)
if i == 0 and j == 1:
# These two matrices were designed so that there has to be a zero for each
# column on each row and vice versa.
assert self._has_corresponding_layers(mislabel_cost_matrix)
if (i == 2 and j == 3) or (i == 0 and j == 1) or (i == 1 and j == 6):
self.report('Mislabel cost matrix for cnn-%d and cnn-%d:\n%s'%(i, j,
str(np.round(mislabel_cost_matrix, 3))), 'test_result')
def test_connectivity_cost_matrix(self):
""" Tests the connectivity cost matrix for specific pairs of neural networks. """
self.report('Testing generation of connectivity cost matrices for specific cnns.')
num_cnns = len(self.cnns)
for i in range(num_cnns):
for j in range(i, num_cnns):
cnn_i = self.cnns[i]
cnn_j = self.cnns[j]
struct_cost_matrix = self.tp_comp.get_struct_cost_matrix(cnn_i, cnn_j)
assert struct_cost_matrix.shape[0] == cnn_i.num_layers
assert struct_cost_matrix.shape[1] == cnn_j.num_layers
if i == j:
assert self._is_cost_matrix_for_same_networks(struct_cost_matrix)
if i == 0 and j == 1:
# These two matrices were designed so that there has to be a zero for each
# column on each row and vice versa.
assert self._has_corresponding_layers(struct_cost_matrix)
if (i == 2 and j == 3) or (i == 0 and j == 1) or (i == 1 and j == 6):
self.report('Structural cost matrix for cnn-%d and cnn-%d:\n%s'%(i, j,
str(np.round(struct_cost_matrix, 3))), 'test_result')
def test_ot_cost_matrix(self):
""" Tests the OT cost matrix for specific pairs of neural networks. """
self.report('Testing generation of OT cost matrices for specific cnns.')
nns = self.cnns
num_nns = len(nns)
for i in range(num_nns):
for j in range(i, num_nns):
nn_i = nns[i]
nn_j = nns[j]
mislabel_cost_matrix = self.tp_comp.get_mislabel_cost_matrix(nn_i, nn_j)
struct_cost_matrix = self.tp_comp.get_struct_cost_matrix(nn_i, nn_j)
ot_cost_matrix = self.tp_comp.get_ot_cost_matrix(mislabel_cost_matrix,
struct_cost_matrix, 1, 0.1, 1, None)
assert ot_cost_matrix.shape[0] == nn_i.num_layers + 1
assert ot_cost_matrix.shape[1] == nn_j.num_layers + 1
if i == j:
assert self._is_cost_matrix_for_same_networks(ot_cost_matrix)
if i == 0 and j == 1:
# These two matrices were designed so that there has to be a zero for each
# column on each row and vice versa.
assert self._has_corresponding_layers(ot_cost_matrix)
if (i == 2 and j == 3) or (i == 0 and j == 1) or (i == 1 and j == 6):
self.report('OT cost matrix for cnn-%d and cnn-%d:\n%s'%(i, j,
str(np.round(ot_cost_matrix, 3))), 'test_result')
@classmethod
def _get_dist_type_abbr(cls, dist_type):
""" Shortens distance type. """
if dist_type == 'lp-norm-by-max':
return 'lnbm'
else:
return dist_type
def _test_dist_comp_for_single_conn_coeff(self, nns, dist_types, tp_comp):
""" Tests distance computation for a single connectivity coefficient. """
num_nns = len(nns)
for i in range(num_nns):
for j in range(i, num_nns):
nn_i = nns[i]
nn_j = nns[j]
dists = {}
for dt in dist_types:
dists[dt] = tp_comp(nn_i, nn_j, dist_type=dt)
res_str = ' '.join(['%s=%s'%(self._get_dist_type_abbr(key),
get_list_of_floats_as_str(val))
for key, val in dists.items()])
self.report('(i,j)=(%d,%d) %s'%(i, j, res_str), 'test_result')
def _test_dist_comp_for_multiple_coeffs(self, nns, dist_types, mislabel_coeffs,
struct_coeffs, tp_comp):
""" Tests distance computation for a single connectivity coefficient. """
num_nns = len(nns)
for i in range(num_nns):
for j in range(i, num_nns):
nn_i = nns[i]
nn_j = nns[j]
dists = {}
for dt in dist_types:
dists[dt] = tp_comp(nn_i, nn_j, dist_type=dt, mislabel_coeffs=mislabel_coeffs,
struct_coeffs=struct_coeffs)
num_dists = len(dt.split('-')) * len(mislabel_coeffs)
assert len(dists[dt]) == num_dists
res_str = ' '.join(['%s=%s'%(self._get_dist_type_abbr(key),
get_list_of_floats_as_str(val))
for key, val in dists.items()])
self.report('(i,j)=(%d,%d) %s'%(i, j, res_str), 'test_result')
def test_cnn_distance_computation(self):
""" Tests the computation of the distance for CNNs. """
struct_coeffs = [0.1, 0.4]
mislabel_coeffs = [1.0] * len(struct_coeffs)
dist_types = ['lp-emd', 'lp', 'emd']
self.report('Testing distance computation for specific cnns with default coeffs.')
self._test_dist_comp_for_single_conn_coeff(self.cnns, dist_types, self.tp_comp)
# With multiple conn_coeffs
self.report('Testing distance computation for specific cnns with conn_coeffs=%s.'%(
struct_coeffs))
self._test_dist_comp_for_multiple_coeffs(self.cnns, dist_types, mislabel_coeffs,
struct_coeffs, self.tp_comp)
def test_mlp_distance_computation(self):
""" Tests the computation of the distance for CNNs. """
self.report('Testing distance computation for specific mlps.')
# Create the transport computation object
mlp_layer_labels, label_mismatch_penalty = \
otmann.get_mlp_layer_label_mismatch_penalties(self.non_assignment_penalty,
'reg')
mlp_tp_comp = otmann.OTMANNDistanceComputer(mlp_layer_labels,
label_mismatch_penalty,
self.non_assignment_penalty,
otmann.MLP_STRUCTURAL_PENALTY_GROUPS,
otmann.PATH_LENGTH_TYPES,
dflt_mislabel_coeffs=1.0, dflt_struct_coeffs=1.0)
# Create the mlp architectures
mlps = generate_mlp_architectures()
struct_coeffs = [0.1, 0.2]
dist_types = ['lp-emd', 'lp', 'emd']
mislabel_coeffs = [1.0] * len(struct_coeffs)
self.report('Testing distance computation for specific mlps with default coeffs.')
self._test_dist_comp_for_single_conn_coeff(mlps, dist_types, mlp_tp_comp)
self.report('Testing distance computation for specific mlps with conn_coeffs=%s.'%(
struct_coeffs))
self._test_dist_comp_for_multiple_coeffs(mlps, dist_types, mislabel_coeffs,
struct_coeffs, mlp_tp_comp)
class DistProdNNKernelTestCase(BaseTestClass):
""" Unit tests for the Transport NNKernels. """
def __init__(self, *args, **kwargs):
""" Constructor. """
super(DistProdNNKernelTestCase, self).__init__(*args, **kwargs)
self.non_assignment_penalty = 1
cnn_layer_labels, label_mismatch_penalty = \
otmann.get_cnn_layer_label_mismatch_penalties(self.non_assignment_penalty)
self.all_layer_labels = cnn_layer_labels
self.label_mismatch_penalty = label_mismatch_penalty
self.tp_comp = otmann.OTMANNDistanceComputer(cnn_layer_labels,
label_mismatch_penalty,
self.non_assignment_penalty,
otmann.CNN_STRUCTURAL_PENALTY_GROUPS,
otmann.PATH_LENGTH_TYPES
)
self.mislabel_coeffs = [2.0, 2.0, 1.0, 1.0, 1.0]
self.struct_coeffs = [0.25, 0.5, 1.0, 2.0, 4.0]
self.lp_betas = [1e-6] * len(self.struct_coeffs)
self.emd_betas = [1] * len(self.struct_coeffs)
self.scale = 1
self.cnns = generate_cnn_architectures()
def test_instantiation(self):
""" Testing instantiation. """
self.report('Testing instantiation of DistProdNNKernelTestCase and computation ' +
'for specific networks.')
dist_type_vals = ['lp', 'emd', 'lp-emd']
all_kernels = []
for dist_type in dist_type_vals:
if dist_type == 'lp':
betas = self.lp_betas
elif dist_type == 'emd':
betas = self.emd_betas
else:
betas = [j for i in zip(self.lp_betas, self.emd_betas) for j in i]
tp_kernel = otmann.get_otmann_kernel_from_params('prod',
self.all_layer_labels, self.label_mismatch_penalty,
self.non_assignment_penalty,
otmann.CNN_STRUCTURAL_PENALTY_GROUPS,
otmann.PATH_LENGTH_TYPES,
self.mislabel_coeffs, self.struct_coeffs, dist_type,
betas, self.scale)
cnn_K = tp_kernel(self.cnns)
all_kernels.append(cnn_K)
cnn_eig_vals, _ = np.linalg.eig(cnn_K)
self.report('dist-type: %s, eigvals: %s.'%(dist_type,
get_list_of_floats_as_str(sorted(cnn_eig_vals))))
self.report('%s transport kernel:\n%s'%(dist_type,
str(np.round(cnn_K, 3))), 'test_result')
assert cnn_K.shape == (len(self.cnns), len(self.cnns))
assert np.all(np.diag(cnn_K) == 1)
# Check if it is in fact the product
if 'lp' in dist_type_vals and 'emd' in dist_type_vals and 'lp-emd' in dist_type_vals:
lp_kernel = all_kernels[dist_type_vals.index('lp')]
emd_kernel = all_kernels[dist_type_vals.index('emd')]
lpemd_kernel = all_kernels[dist_type_vals.index('lp-emd')]
assert np.linalg.norm(lpemd_kernel - lp_kernel * emd_kernel) < _TOL
def test_kernel_computation(self):
""" Testing computation of the lp distance. """
self.report('Testing computed kernel values for specific cnns.')
betas = [0.0001]
scale = 2.1
struct_coeffs = 1.0
mislabel_coeffs = 1.0
dist_type = 'lp'
tp_comp = otmann.OTMANNDistanceComputer(self.all_layer_labels,
self.label_mismatch_penalty, self.non_assignment_penalty,
otmann.CNN_STRUCTURAL_PENALTY_GROUPS,
otmann.PATH_LENGTH_TYPES,
dflt_mislabel_coeffs=mislabel_coeffs,
dflt_struct_coeffs=struct_coeffs,
dflt_dist_type=dist_type)
tp_kernel = otmann.get_otmann_kernel_from_params('prod',
self.all_layer_labels, self.label_mismatch_penalty,
self.non_assignment_penalty, otmann.CNN_STRUCTURAL_PENALTY_GROUPS,
otmann.PATH_LENGTH_TYPES,
mislabel_coeffs, struct_coeffs, dist_type, betas, scale)
cnn_dists = tp_comp(self.cnns, self.cnns)
cnn_kernel = tp_kernel(self.cnns)
diff = np.linalg.norm(scale * np.exp(-cnn_dists[0] * betas[0]) - cnn_kernel)
assert diff < np.linalg.norm(cnn_kernel) * 1e-6
class DistSumNNKernelTestCase(BaseTestClass):
""" Unit tests for the Transport NNKernels. """
def __init__(self, *args, **kwargs):
""" Constructor. """
super(DistSumNNKernelTestCase, self).__init__(*args, **kwargs)
self.non_assignment_penalty = 1
mlp_layer_labels, label_mismatch_penalty = \
otmann.get_mlp_layer_label_mismatch_penalties(self.non_assignment_penalty,
'reg')
self.all_layer_labels = mlp_layer_labels
self.label_mismatch_penalty = label_mismatch_penalty
self.tp_comp = otmann.OTMANNDistanceComputer(mlp_layer_labels,
label_mismatch_penalty,
self.non_assignment_penalty,
otmann.MLP_STRUCTURAL_PENALTY_GROUPS,
otmann.PATH_LENGTH_TYPES
)
self.mislabel_coeffs = [2.0, 2.0, 1.0, 1.0, 1.0]
self.struct_coeffs = [0.25, 0.5, 1.0, 2.0, 4.0]
self.lp_betas = [1e-6] * len(self.struct_coeffs)
self.emd_betas = [1] * len(self.struct_coeffs)
self.mlps = generate_mlp_architectures()
def test_instantiation_and_computation(self):
""" Testing instantiation. """
self.report('Testing instantiation of DistSumNNKernelTestCase and computation ' +
'for specific networks.')
dist_type_vals = ['lp', 'emd', 'lp-emd']
all_kernels = []
for dist_type in dist_type_vals:
if dist_type == 'lp':
betas = self.lp_betas
scales = [1]
elif dist_type == 'emd':
betas = self.emd_betas
scales = [1]
else:
betas = [j for i in zip(self.lp_betas, self.emd_betas) for j in i]
scales = [1, 1]
tp_kernel = otmann.get_otmann_kernel_from_params('sum',
self.all_layer_labels, self.label_mismatch_penalty,
self.non_assignment_penalty,
otmann.MLP_STRUCTURAL_PENALTY_GROUPS,
otmann.PATH_LENGTH_TYPES,
self.mislabel_coeffs, self.struct_coeffs, dist_type, betas, scales)
nn_K = tp_kernel(self.mlps)
nn_eig_vals, _ = np.linalg.eig(nn_K)
self.report('dist-type: %s, eigvals: %s.'%(dist_type,
get_list_of_floats_as_str(sorted(nn_eig_vals))))
assert nn_K.shape == (len(self.mlps), len(self.mlps))
self.report('%s transport kernel:\n%s'%(dist_type,
str(np.round(nn_K, 3))), 'test_result')
assert np.all(np.diag(nn_K) == sum(scales))
all_kernels.append(nn_K)
# Check if it is in fact the sum
if 'lp' in dist_type_vals and 'emd' in dist_type_vals and 'lp-emd' in dist_type_vals:
lp_kernel = all_kernels[dist_type_vals.index('lp')]
emd_kernel = all_kernels[dist_type_vals.index('emd')]
lpemd_kernel = all_kernels[dist_type_vals.index('lp-emd')]
assert np.linalg.norm(lpemd_kernel - lp_kernel - emd_kernel) < _TOL
def test_sum_product_equivalence(self):
""" Unit-test for testing that both kernels compute the same thing in certain cases.
"""
dist_type_vals = ['lp', 'emd']
for dist_type in dist_type_vals:
if dist_type == 'lp':
betas = self.lp_betas
scales = [1]
elif dist_type == 'emd':
betas = self.emd_betas
scales = [1]
sum_kernel = otmann.get_otmann_kernel_from_params('sum',
self.all_layer_labels, self.label_mismatch_penalty,
self.non_assignment_penalty,
otmann.MLP_STRUCTURAL_PENALTY_GROUPS,
otmann.PATH_LENGTH_TYPES,
self.mislabel_coeffs, self.struct_coeffs, dist_type, betas, scales)
prod_kernel = otmann.get_otmann_kernel_from_params('prod',
self.all_layer_labels, self.label_mismatch_penalty,
self.non_assignment_penalty,
otmann.MLP_STRUCTURAL_PENALTY_GROUPS,
otmann.PATH_LENGTH_TYPES,
self.mislabel_coeffs, self.struct_coeffs, dist_type, betas, scales)
sum_nn_K = sum_kernel(self.mlps)
prod_nn_K = prod_kernel(self.mlps)
assert np.linalg.norm(sum_nn_K - prod_nn_K) < _TOL
if __name__ == '__main__':
execute_tests()
| {"hexsha": "d28f7f24278b313a7a2eda3b820a9b07f82b1a6d", "size": 19090, "ext": "py", "lang": "Python", "max_stars_repo_path": "dragonfly/nn/unittest_otmann.py", "max_stars_repo_name": "hase1128/dragonfly", "max_stars_repo_head_hexsha": "4be7e4c539d3edccc4d243ab9f972b1ffb0d9a5c", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 675, "max_stars_repo_stars_event_min_datetime": "2018-08-23T17:30:46.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-30T18:37:23.000Z", "max_issues_repo_path": "dragonfly/nn/unittest_otmann.py", "max_issues_repo_name": "hase1128/dragonfly", "max_issues_repo_head_hexsha": "4be7e4c539d3edccc4d243ab9f972b1ffb0d9a5c", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 62, "max_issues_repo_issues_event_min_datetime": "2018-11-30T23:40:19.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-10T19:47:27.000Z", "max_forks_repo_path": "dragonfly/nn/unittest_otmann.py", "max_forks_repo_name": "hase1128/dragonfly", "max_forks_repo_head_hexsha": "4be7e4c539d3edccc4d243ab9f972b1ffb0d9a5c", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 349, "max_forks_repo_forks_event_min_datetime": "2018-09-10T19:04:34.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-31T13:10:45.000Z", "avg_line_length": 47.2524752475, "max_line_length": 90, "alphanum_fraction": 0.6422734416, "include": true, "reason": "import numpy", "num_tokens": 4747} |
"""
Tests for datatype and galaxycluster
"""
from numpy.testing import assert_raises, assert_equal
from clmm import GCData
from clmm import Cosmology
def test_init():
gcdata = GCData()
assert_equal(None, gcdata.meta['cosmo'])
def test_update_cosmo():
# Define inputs
cosmo1 = Cosmology(H0=70.0, Omega_dm0=0.3-0.045, Omega_b0=0.045)
desc1 = cosmo1.get_desc()
gcdata = GCData()
# check it has __str__ adn __repr__
gcdata.__str__()
gcdata.__repr__()
# manual update
gcdata.update_cosmo_ext_valid(gcdata, cosmo1, overwrite=False)
assert_equal(desc1, gcdata.meta['cosmo'])
# check that adding cosmo metadata manually is forbidden
assert_raises(ValueError, gcdata.meta.__setitem__, 'cosmo', None)
assert_raises(ValueError, gcdata.meta.__setitem__, 'cosmo', cosmo1)
# update_cosmo funcs
# input_cosmo=None, data_cosmo=None
gcdata = GCData()
gcdata.update_cosmo_ext_valid(gcdata, None, overwrite=False)
assert_equal(None, gcdata.meta['cosmo'])
gcdata = GCData()
gcdata.update_cosmo_ext_valid(gcdata, None, overwrite=True)
assert_equal(None, gcdata.meta['cosmo'])
gcdata = GCData()
gcdata.update_cosmo(None, overwrite=False)
assert_equal(None, gcdata.meta['cosmo'])
gcdata = GCData()
gcdata.update_cosmo(None, overwrite=True)
assert_equal(None, gcdata.meta['cosmo'])
# input_cosmo!=None, data_cosmo=None
gcdata = GCData()
gcdata.update_cosmo_ext_valid(gcdata, cosmo1, overwrite=True)
assert_equal(desc1, gcdata.meta['cosmo'])
gcdata = GCData()
gcdata.update_cosmo(cosmo1, overwrite=False)
assert_equal(desc1, gcdata.meta['cosmo'])
gcdata = GCData()
gcdata.update_cosmo(cosmo1, overwrite=True)
assert_equal(desc1, gcdata.meta['cosmo'])
# input_cosmo=data_cosmo!=None
gcdata = GCData()
gcdata.update_cosmo(cosmo1)
gcdata.update_cosmo_ext_valid(gcdata, cosmo1, overwrite=False)
assert_equal(desc1, gcdata.meta['cosmo'])
gcdata = GCData()
gcdata.update_cosmo(cosmo1)
gcdata.update_cosmo_ext_valid(gcdata, cosmo1, overwrite=True)
assert_equal(desc1, gcdata.meta['cosmo'])
gcdata = GCData()
gcdata.update_cosmo(cosmo1)
gcdata.update_cosmo(cosmo1, overwrite=False)
assert_equal(desc1, gcdata.meta['cosmo'])
gcdata = GCData()
gcdata.update_cosmo(cosmo1)
gcdata.update_cosmo(cosmo1, overwrite=True)
assert_equal(desc1, gcdata.meta['cosmo'])
# input_cosmo(!=None) != data_cosmo(!=None)
cosmo2 = Cosmology(H0=60.0, Omega_dm0=0.3-0.045, Omega_b0=0.045)
desc2 = cosmo2.get_desc()
gcdata = GCData()
gcdata.update_cosmo(cosmo1)
assert_raises(TypeError, gcdata.update_cosmo_ext_valid, gcdata, cosmo2, overwrite=False)
assert_raises(TypeError, gcdata.update_cosmo_ext_valid, gcdata, cosmo2)
gcdata = GCData()
gcdata.update_cosmo(cosmo1)
gcdata.update_cosmo_ext_valid(gcdata, cosmo2, overwrite=True)
assert_equal(desc2, gcdata.meta['cosmo'])
gcdata = GCData()
gcdata.update_cosmo(cosmo1)
gcdata.update_cosmo(cosmo1, overwrite=False)
assert_equal(desc1, gcdata.meta['cosmo'])
gcdata = GCData()
gcdata.update_cosmo(cosmo1)
assert_raises(TypeError, gcdata.update_cosmo, cosmo2, overwrite=False)
assert_raises(TypeError, gcdata.update_cosmo, cosmo2)
# test_creator = 'Mitch'
# test_creator_diff = 'Witch'
# test_dict = {'test%d'%i:True for i in range(3)}
# test_dict_diff = {'test%d'%i:False for i in range(3)}
# test_dict_sub = {'test%d'%i:True for i in range(2)}
# test_table = []
# test_data = GCData(test_creator, test_dict, test_table)
# test_data_diff = GCData(test_creator, test_dict_diff, test_table)
# def test_check_subdict():
#
# assert check_subdict(test_dict_sub, test_dict)
# assert not check_subdict(test_dict, test_dict_sub)
# assert not check_subdict(test_dict_sub, test_dict_diff)
#
# def test_find_in_datalist():
#
# tst.assert_equal([test_data], find_in_datalist(test_dict, [test_data]))
# tst.assert_equal([test_data], find_in_datalist(test_dict_sub, [test_data]))
# tst.assert_equal([], find_in_datalist(test_dict_diff, [test_data]))
#
# tst.assert_equal([test_data], find_in_datalist(test_dict, [test_data], exact=True))
# tst.assert_equal([], find_in_datalist(test_dict_sub, [test_data], exact=True))
# tst.assert_equal([], find_in_datalist(test_dict_diff, [test_data], exact=True))
# def test_find_data():
#
# gc = GalaxyCluster('test_cluster', test_data)
#
# tst.assert_equal([], gc.find_data(test_creator_diff, test_dict))
#
# tst.assert_equal([test_data], gc.find_data(test_creator, test_dict))
# tst.assert_equal([test_data], gc.find_data(test_creator, test_dict_sub))
# tst.assert_equal([], gc.find_data(test_creator, test_dict_diff))
#
# tst.assert_equal([test_data], gc.find_data(test_creator, test_dict, exact=True))
# tst.assert_equal([], gc.find_data(test_creator, test_dict_sub, exact=True))
# tst.assert_equal([], gc.find_data(test_creator, test_dict_diff, exact=True))
# def test_add_data():
# gc = GalaxyCluster('test_cluster')
# tst.assert_raises(TypeError, gc.add_data, '')
# tst.assert_raises(TypeError, gc.add_data, '', force=True)
# tst.assert_equal(None, gc.add_data(test_data, force=True))
# gc = GalaxyCluster('test_cluster')
# tst.assert_equal(None, gc.add_data(test_data))
# tst.assert_equal(None, gc.add_data(test_data_diff))
# tst.assert_raises(ValueError, gc.add_data, test_data)
# tst.assert_equal(None, gc.add_data(test_data, force=True))
#
# def test_remove_data():
#
# gc = GalaxyCluster('test_cluster', test_data)
# tst.assert_raises(ValueError, gc.remove_data, test_creator_diff, test_dict)
# tst.assert_raises(ValueError, gc.remove_data, test_creator, test_dict_sub)
# tst.assert_raises(ValueError, gc.remove_data, test_creator, test_dict_diff)
# tst.assert_equal(None, gc.remove_data(test_creator, test_dict))
# tst.assert_raises(ValueError, gc.remove_data, test_creator, test_dict)
#
# def test_read_GC():
# pass
# def test_write_GC():
# pass
| {"hexsha": "4503f7a9e776a39f9049f07e6e7bfb6fcb3a25ea", "size": 6144, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/test_gcdata.py", "max_stars_repo_name": "96RadhikaJadhav/CLMM", "max_stars_repo_head_hexsha": "cd0508f82f9a6a4692fe785277ac25c73e89d0d7", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "tests/test_gcdata.py", "max_issues_repo_name": "96RadhikaJadhav/CLMM", "max_issues_repo_head_hexsha": "cd0508f82f9a6a4692fe785277ac25c73e89d0d7", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tests/test_gcdata.py", "max_forks_repo_name": "96RadhikaJadhav/CLMM", "max_forks_repo_head_hexsha": "cd0508f82f9a6a4692fe785277ac25c73e89d0d7", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.1085714286, "max_line_length": 92, "alphanum_fraction": 0.7195638021, "include": true, "reason": "from numpy", "num_tokens": 1663} |
# Copyright 2019 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Functions to train and evaluate."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import datetime
import os.path
import cv2
import numpy as np
from skimage.measure import compare_ssim
from src.utils import preprocess
def batch_psnr(gen_frames, gt_frames):
"""Computes PSNR for a batch of data."""
if gen_frames.ndim == 3:
axis = (1, 2)
elif gen_frames.ndim == 4:
axis = (1, 2, 3)
x = np.int32(gen_frames)
y = np.int32(gt_frames)
num_pixels = float(np.size(gen_frames[0]))
mse = np.sum((x - y)**2, axis=axis, dtype=np.float32) / num_pixels
psnr = 20 * np.log10(255) - 10 * np.log10(mse)
return np.mean(psnr)
def train(model, ims, real_input_flag, configs, itr):
"""Trains a model."""
ims_list = np.split(ims, configs.n_gpu)
cost = model.train(ims_list, configs.lr, real_input_flag, itr)
if configs.reverse_input:
ims_rev = np.split(ims[:, ::-1], configs.n_gpu)
cost += model.train(ims_rev, configs.lr, real_input_flag, itr)
cost = cost / 2
if itr % configs.display_interval == 0:
print(datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'),
'itr: ' + str(itr))
print('training loss: ' + str(cost))
def test(model, test_input_handle, configs, save_name):
"""Evaluates a model."""
print(datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S'), 'test...')
res_path = os.path.join(configs.gen_frm_dir, str(save_name))
os.mkdir(res_path)
avg_mse = 0
batch_id = 0
img_mse, ssim, psnr = [], [], []
output_length = configs.total_length - configs.input_length
for i in range(output_length):
img_mse.append(0)
ssim.append(0)
psnr.append(0)
real_input_flag_zero = np.zeros((configs.batch_size, output_length - 1,
configs.img_width // configs.patch_size,
configs.img_width // configs.patch_size,
configs.patch_size**2 * configs.img_channel))
for i in range(90):
test_ims = test_input_handle.__getitem__(batch_id)
batch_id = batch_id + 1
test_dat = preprocess.reshape_patch(test_ims, configs.patch_size)
test_dat = np.split(test_dat, configs.n_gpu)
img_gen = model.test(test_dat, real_input_flag_zero)
# Concat outputs of different gpus along batch
img_gen = np.concatenate(img_gen)
img_gen = preprocess.reshape_patch_back(img_gen, configs.patch_size)
img_out = img_gen[:, -output_length:]
target_out = test_ims[:, -output_length:]
# MSE per frame
for i in range(output_length):
x = target_out[:, i]
gx = img_out[:, i]
gx = np.maximum(gx, 0)
gx = np.minimum(gx, 1)
mse = np.square(x - gx).sum()
img_mse[i] += mse
avg_mse += mse
# for b in range(configs.batch_size):
# ssim[i] += compare_ssim(x[b], gx[b], multichannel=True)
x = np.uint8(x * 255)
gx = np.uint8(gx * 255)
psnr[i] += batch_psnr(gx, x)
# save prediction examples
if batch_id <= configs.num_save_samples:
path = os.path.join(res_path, str(batch_id))
os.mkdir(path)
for i in range(configs.total_length):
if (i + 1) < 10:
name = 'gt0' + str(i + 1) + '.png'
else:
name = 'gt' + str(i + 1) + '.png'
file_name = os.path.join(path, name)
img_gt = np.uint8(test_ims[0, i] * 255)
cv2.imwrite(file_name, img_gt)
for i in range(output_length):
if (i + configs.input_length + 1) < 10:
name = 'pd0' + str(i + configs.input_length + 1) + '.png'
else:
name = 'pd' + str(i + configs.input_length + 1) + '.png'
file_name = os.path.join(path, name)
img_pd = img_gen[0, i]
img_pd = np.maximum(img_pd, 0)
img_pd = np.minimum(img_pd, 1)
img_pd = np.uint8(img_pd * 255)
cv2.imwrite(file_name, img_pd)
avg_mse = avg_mse / (batch_id * configs.batch_size * configs.n_gpu)
print('mse per seq: ' + str(avg_mse))
for i in range(output_length):
print(img_mse[i] / (batch_id * configs.batch_size * configs.n_gpu))
psnr = np.asarray(psnr, dtype=np.float32) / batch_id
print('psnr per frame: ' + str(np.mean(psnr)))
for i in range(output_length):
print(psnr[i])
# ssim = np.asarray(ssim, dtype=np.float32) / (configs.batch_size * batch_id)
# print('ssim per frame: ' + str(np.mean(ssim)))
# for i in range(output_length):
# print(ssim[i])
| {"hexsha": "317e9ac178a54833b44028edd640ce6922b03712", "size": 5155, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/trainer.py", "max_stars_repo_name": "AlbaraaKhayat/e3d_lstm", "max_stars_repo_head_hexsha": "3caa74fb203fbfc0857a20088d559fec23ba8d99", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/trainer.py", "max_issues_repo_name": "AlbaraaKhayat/e3d_lstm", "max_issues_repo_head_hexsha": "3caa74fb203fbfc0857a20088d559fec23ba8d99", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/trainer.py", "max_forks_repo_name": "AlbaraaKhayat/e3d_lstm", "max_forks_repo_head_hexsha": "3caa74fb203fbfc0857a20088d559fec23ba8d99", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.3082191781, "max_line_length": 80, "alphanum_fraction": 0.6322017459, "include": true, "reason": "import numpy", "num_tokens": 1409} |
from skimage import io, transform
import glob
import os
import tensorflow as tf
import numpy as np
import time
import matplotlib.pyplot as plt
import random
plt.rcParams['font.sans-serif'] = ['SimHei'] # 用来正常显示中文标签
plt.rcParams['axes.unicode_minus'] = False # 用来正常显示负号
'''
数据集来源
@misc{e-VDS,
author = {Culurciello, Eugenio and Canziani, Alfredo},
title = {{e-Lab} Video Data Set},
howpublished = {url{https://engineering.purdue.edu/elab/eVDS/}},
year={2017}
}
'''
# 训练数据
train_path = 'F:/10-image-set/train/'
# 验证数据
val_path = 'F:/10-image-set/val/'
# 将所有的图片resize成100*100
w = 100
h = 100
# 三色通道
c = 3
# 读取图片
def read_img(path):
# 将path路径下的所有文件夹路径存到cate
cate = [path + x for x in os.listdir(path) if os.path.isdir(path + x)]
imgs = []
labels = []
for idx, folder in enumerate(cate):
print('分类标签idx:', idx)
print('文件夹路径folder:', folder)
i = 0
# 将文件夹下所有图片挨个打上标签并分别存到imgs,labels
files = glob.glob(folder + '/*.jpg')
random.shuffle(files)
for im in files:
img = io.imread(im)
img = transform.resize(img, (w, h, c), mode="reflect")
imgs.append(img)
# 以目录的index作为label
labels.append(idx)
# 数据集太大每种类别选6000张
i += 1
if i == 6500:
break
return np.asarray(imgs, np.float32), np.asarray(labels, np.int32)
# 加载训练数据集
print("加载训练数据集")
t_data, t_label = read_img(train_path)
# 加载验证数据集
print("加载验证数据集")
v_data, v_label = read_img(val_path)
# 打乱训练数据集的顺序
num_example = t_data.shape[0]
arr = np.arange(num_example)
np.random.shuffle(arr)
train_data = t_data[arr]
train_label = t_label[arr]
# 训练集
s = np.int(num_example)
x_train = train_data[:s]
y_train = train_label[:s]
# 验证集
x_val = v_data
y_val = v_label
# -----------------构建网络----------------------
# shape=[None, w, h, c]表示每个样本w*h*c维的向量表示,但不确定有多少个训练样本。所以第一维是None
# 样本的特征输入
x = tf.placeholder(tf.float32, shape=[None, w, h, c], name='x')
# 样本的类别标签
y_ = tf.placeholder(tf.int32, shape=[None, ], name='y_')
# 第一个卷积层(100->50)
# Tensorflow中padding有两种类型SAME和VALID SAME填充0使维度保持不变 VALID不填充0
conv1 = tf.layers.conv2d(
inputs=x,
filters=32,
kernel_size=[5, 5],
padding="same",
activation=tf.nn.relu,
kernel_initializer=tf.truncated_normal_initializer(stddev=0.01))
pool1 = tf.layers.max_pooling2d(inputs=conv1, pool_size=[2, 2], strides=2)
# 第二个卷积层(50->25)
conv2 = tf.layers.conv2d(
inputs=pool1,
filters=64,
kernel_size=[5, 5],
padding="same",
activation=tf.nn.relu,
kernel_initializer=tf.truncated_normal_initializer(stddev=0.01))
pool2 = tf.layers.max_pooling2d(inputs=conv2, pool_size=[2, 2], strides=2)
# 第三个卷积层(25->12)
conv3 = tf.layers.conv2d(
inputs=pool2,
filters=128,
kernel_size=[3, 3],
padding="same",
activation=tf.nn.relu,
kernel_initializer=tf.truncated_normal_initializer(stddev=0.01))
pool3 = tf.layers.max_pooling2d(inputs=conv3, pool_size=[2, 2], strides=2)
# 第四个卷积层(12->6)
conv4 = tf.layers.conv2d(
inputs=pool3,
filters=128,
kernel_size=[3, 3],
padding="same",
activation=tf.nn.relu,
kernel_initializer=tf.truncated_normal_initializer(stddev=0.01))
pool4 = tf.layers.max_pooling2d(inputs=conv4, pool_size=[2, 2], strides=2)
re1 = tf.reshape(pool4, [-1, 6 * 6 * 128])
# dropout减少过拟合
keep_prob1 = tf.placeholder(tf.float32, name="keep_prob1")
# tf.nn.dropout(x, keep_prob, noise_shape=None, seed=None, name=None)接口
# x:输入tensor keep_prob:float类型,每个元素被保留下来的概率 noise_shape:一个1维的int32张量,代表了随机产生“保留/丢弃”标志的shape
# seed:整型变量,随机数种子 name:可视化时显示的名称
fc_drop1 = tf.nn.dropout(re1, keep_prob1)
# 全连接层
dense1 = tf.layers.dense(inputs=fc_drop1,
units=1024,
activation=tf.nn.relu,
kernel_initializer=tf.truncated_normal_initializer(stddev=0.01),
kernel_regularizer=tf.contrib.layers.l2_regularizer(0.003))
# dropout减少过拟合
keep_prob2 = tf.placeholder(tf.float32, name="keep_prob2")
# tf.nn.dropout(x, keep_prob, noise_shape=None, seed=None, name=None)接口
# x:输入tensor keep_prob:float类型,每个元素被保留下来的概率 noise_shape:一个1维的int32张量,代表了随机产生“保留/丢弃”标志的shape
# seed:整型变量,随机数种子 name:可视化时显示的名称
fc_drop2 = tf.nn.dropout(dense1, keep_prob2)
dense2 = tf.layers.dense(inputs=fc_drop2,
units=512,
activation=tf.nn.relu,
kernel_initializer=tf.truncated_normal_initializer(stddev=0.01),
kernel_regularizer=tf.contrib.layers.l2_regularizer(0.003))
# dropout减少过拟合
keep_prob3 = tf.placeholder(tf.float32, name="keep_prob3")
# tf.nn.dropout(x, keep_prob, noise_shape=None, seed=None, name=None)接口
# x:输入tensor keep_prob:float类型,每个元素被保留下来的概率 noise_shape:一个1维的int32张量,代表了随机产生“保留/丢弃”标志的shape
# seed:整型变量,随机数种子 name:可视化时显示的名称
fc_drop3 = tf.nn.dropout(dense2, keep_prob3)
# 最后输出层使用10个神经元得到10维向量对应分类
logits = tf.layers.dense(inputs=fc_drop3,
units=10,
activation=None,
kernel_initializer=tf.truncated_normal_initializer(stddev=0.01),
kernel_regularizer=tf.contrib.layers.l2_regularizer(0.003))
# ---------------------------网络结束---------------------------
# 计算神经网络损失
# logits是神经网络最后一层的输入 labels是神经网络期望的输出
# 函数的作用就是计算最后一层的cross entropy,只不过tensorflow把softmax计算与cross entropy计算放到一起用一个函数来实现,用来提高程序的运行速度
loss = tf.losses.sparse_softmax_cross_entropy(labels=y_, logits=logits)
# 基于一定的学习率进行梯度优化训练
# tf.train.AdamOptimizer使用Adam算法的Optimizer
# 使用minimize()操作,该操作不仅可以计算出梯度,而且还可以将梯度作用在变量上
train_op = tf.train.AdamOptimizer(learning_rate=0.00001).minimize(loss)
# 评估分类结果
# tf.argmax(input, dimension, name=None) - input:输入的张量 - demension - name:给部件起个名字,可以不用起
# 这个函数的作用是给出预测出来的5个结果(对应的每个类别的分类概率)中的最大值的下标
# correct_prediction = tf.equal(tf.cast(tf.argmax(logits, 1), tf.int32), y_) 会生成一组向量,如:[True, False, True, True]
correct_prediction = tf.equal(tf.cast(tf.argmax(logits, 1), tf.int32), y_)
# 把它映射成浮点数,然后计算它们的均值
acc = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
# 定义一个函数,按批次取数据
def minibatches(inputs=None, targets=None, batch_size=None, shuffle=False):
assert len(inputs) == len(targets)
if shuffle:
indices = np.arange(len(inputs))
np.random.shuffle(indices)
for start_idx in range(0, len(inputs) - batch_size + 1, batch_size):
if shuffle:
excerpt = indices[start_idx:start_idx + batch_size]
else:
excerpt = slice(start_idx, start_idx + batch_size)
yield inputs[excerpt], targets[excerpt]
# 训练和测试数据,可将n_epoch设置更大一些
# 训练次数
n_epoch = 15
# 每次取多少数据进行训练或验证
batch_size = 16
# 生成交互式会话
sess = tf.InteractiveSession()
# 加载模型进当前会话
saver = tf.train.Saver()
saver.restore(sess, 'E:/RealTimeIR/model/10-image-set')
# 从已保存的模型中就已经加载好这些参数继续训练
# 一次把所有的Variable类型初始化
# sess.run(tf.global_variables_initializer())
# 保存模型
# saver = tf.train.Saver(max_to_keep=1)
max_acc = 0
# 记录作图数据
plt_x = range(0, n_epoch)
plt_a1 = []
plt_a2 = []
plt_l1 = []
plt_l2 = []
for epoch in range(n_epoch):
# training
t_start_time = time.time()
train_loss, train_acc, n_batch = 0, 0, 0
for x_train_a, y_train_a in minibatches(x_train, y_train, batch_size, shuffle=True):
_, err, ac = sess.run([train_op, loss, acc],
feed_dict={x: x_train_a, y_: y_train_a, keep_prob1: 0.3, keep_prob2: 0.3,
keep_prob3: 0.3})
train_loss += err
train_acc += ac
n_batch += 1
t_end_time = time.time()
print("train loss: %f" % (train_loss / n_batch))
plt_l1.append(train_loss / n_batch)
print("train acc: %f" % (train_acc / n_batch))
plt_a1.append(train_acc / n_batch)
print("train time: %f 分钟" % divmod(t_end_time - t_start_time, 60)[0])
# validation
# 验证时间很短不记录时间
val_loss, val_acc, n_batch = 0, 0, 0
for x_val_a, y_val_a in minibatches(x_val, y_val, batch_size, shuffle=False):
err, ac = sess.run([loss, acc],
feed_dict={x: x_val_a, y_: y_val_a, keep_prob1: 1.0, keep_prob2: 1.0, keep_prob3: 1.0})
val_loss += err
val_acc += ac
n_batch += 1
print("validation loss: %f" % (val_loss / n_batch))
plt_l2.append(val_loss / n_batch)
print("validation acc: %f" % (val_acc / n_batch))
plt_a2.append(val_acc / n_batch)
# 保存精确度最大的一次模型
if val_acc > max_acc:
max_acc = val_acc
saver.save(sess, './model/10-image-set')
print("模型保存,精度: %f" % (val_acc / n_batch))
# matplotlib作图
plt.figure("Accuracy&Loss")
# 第一张图展示Accuracy
plt.subplot(1, 2, 1)
plt.plot(plt_x, plt_a1, label='train accuracy', marker='o',
markerfacecolor='blue', markersize=12)
plt.plot(plt_x, plt_a2, label='validation accuracy', linewidth=3, color='r', marker='o',
markerfacecolor='red', markersize=12)
plt.xlabel('迭代次数')
plt.ylabel('Accuracy')
plt.title('Accuracy')
plt.legend()
# 第二张图展示Loss
plt.subplot(1, 2, 2)
plt.plot(plt_x, plt_l1, label='train loss', marker='o',
markerfacecolor='blue', markersize=12)
plt.plot(plt_x, plt_l2, label='validation loss', linewidth=3, color='r', marker='o',
markerfacecolor='red', markersize=12)
plt.xlabel('迭代次数')
plt.ylabel('Loss')
plt.title('Loss')
plt.legend()
plt.savefig("Accuracy&Loss.png")
# plt.show()
sess.close()
| {"hexsha": "4a99c6abfa8fe22c002c07be0bf04e38473c6948", "size": 9387, "ext": "py", "lang": "Python", "max_stars_repo_path": "Python/cnn.py", "max_stars_repo_name": "2016gary/RealTimeIR", "max_stars_repo_head_hexsha": "77ae3b673307821febbcf89c3e4d2da1834f5119", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Python/cnn.py", "max_issues_repo_name": "2016gary/RealTimeIR", "max_issues_repo_head_hexsha": "77ae3b673307821febbcf89c3e4d2da1834f5119", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Python/cnn.py", "max_forks_repo_name": "2016gary/RealTimeIR", "max_forks_repo_head_hexsha": "77ae3b673307821febbcf89c3e4d2da1834f5119", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.3946488294, "max_line_length": 114, "alphanum_fraction": 0.6621923937, "include": true, "reason": "import numpy", "num_tokens": 3296} |
#!/usr/bin/env python
'''
Copyright (C) 2013- Swedish Meteorological and Hydrological Institute (SMHI)
This file is part of RAVE.
RAVE is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
RAVE is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public License
along with RAVE. If not, see <http://www.gnu.org/licenses/>.
'''
## Performs hit-accumulation clutter filtering using hit-accumulation
# monthly "climatologies" or "counter files".
# Added also Z-diff quality indicator.
## @file
## @author Daniel Michelson, SMHI
## @date 2013-01-14
import sys, os, time, glob, types, traceback
import multiprocessing
import _raveio, _ravefield
import _polarvolume, _polarscan
import _pyhl, _odc_hac
import rave_defines
import odim_source
from Proj import rd
from numpy import zeros, uint8, uint32
import xml.etree.ElementTree as ET
HACDATA = rave_defines.RAVEROOT + '/share/hac/data'
CONFIG_FILE = rave_defines.RAVECONFIG + '/hac_options.xml'
initialized = 0
ARGS = {}
## Initializes the ARGS dictionary by reading config from XML file
def init():
global initialized
if initialized: return
C = ET.parse(CONFIG_FILE)
OPTIONS = C.getroot()
for site in list(OPTIONS):
hac = HAC()
for k in site.attrib.keys():
if k == "threshold": hac.thresh = float(site.attrib[k])
ARGS[site.tag] = hac
initialized = 1
class HAC:
def __init__(self):
self.hac = None
self.thresh = None
## Creates a HAC. Should be called only after a failed call to \ref readHac
# @param fstr file string
# @param nrays int number of rays in the scan
# @param nbins int number of bins per ray
def makeHac(self, fstr, nrays, nbins):
if not os.path.isfile(fstr):
self.hac = _ravefield.new()
self.hac.addAttribute("how/count", 0)
self.hac.setData(zeros((nrays, nbins), uint32))
else:
raise IOError, "HAC file already exists: %s" % fstr
## Reads a HAC HDF5 file and returns the dataset in it.
# @param fstr file string
def readHac(self, fstr):
if os.path.isfile(fstr):
nodelist = _pyhl.read_nodelist(fstr)
nodelist.selectNode("/accumulation_count")
nodelist.selectNode("/hit_accum")
nodelist.fetch()
self.hac = _ravefield.new()
self.hac.addAttribute("how/count",
nodelist.getNode("/accumulation_count").data())
self.hac.setData(nodelist.getNode("/hit_accum").data())
else:
raise IOError, "No such HAC file: %s" % fstr
## Writes a HAC to HDF5.
# @param fstr file string
# @param compression int ZLIB compression level
def writeHac(self, fstr, compression=0):
nodelist = _pyhl.nodelist()
node = _pyhl.node(_pyhl.ATTRIBUTE_ID, "/accumulation_count")
node.setScalarValue(-1,self.hac.getAttribute("how/count"),"long",-1)
nodelist.addNode(node)
node = _pyhl.node(_pyhl.ATTRIBUTE_ID, "/validity_time_of_last_update")
node.setScalarValue(-1,int(time.time()),"long",-1)
nodelist.addNode(node)
node = _pyhl.node(_pyhl.DATASET_ID, "/hit_accum")
node.setArrayValue(-1,[self.hac.ysize, self.hac.xsize],
self.hac.getData(),"uint",-1)
nodelist.addNode(node)
fcp = _pyhl.filecreationproperty()
fcp.userblock = 0
fcp.sizes = (4,4)
fcp.sym_k = (1,1)
fcp.istore_k = 1
fcp.meta_block_size = 0
path = os.path.split(fstr)[0]
if not os.path.isdir(path):
os.makedirs(path)
nodelist.write(fstr, compression, fcp)
## Performs the filtering
# @param scan input SCAN object
# @param param string of the quantity to filter
# @param enough int lower threshold of the number of hits to accept in order to process
def hacFilter(self, scan, quant="DBZH", enough=100):
NOD = odim_source.NODfromSource(scan)
# If HAC files are missing, then this method will passively fail.
try:
self.readHac(hacFile(scan, lastmonth=True))
if self.hac.getAttribute("how/count") < enough:
raise ValueError, "Not enough hits in climatology for %s" % NOD
hac_data = self.hac.getData()
if hac_data.shape != (scan.nrays, scan.nbins):
print hac_data.shape, (scan.nrays, scan.nbins)
raise IOError, "Scan and HAC have different geometries for %s" % NOD
## Get site-specific threshold!
try:
self.thresh = ARGS[NOD].thresh
except KeyError:
self.thresh = ARGS["default"].thresh
## Got site-specific threshold?
qind = _ravefield.new()
qind.setData(zeros(hac_data.shape, uint8))
qind.addAttribute("how/task", "eu.opera.odc.hac")
qind.addAttribute("how/task_args", self.thresh)
scan.addQualityField(qind)
_odc_hac.hacFilter(scan, self.hac, quant)
except Exception, e:
print traceback.format_exc()
## Increments the HAC with the hits in the current scan.
# @param scan input SCAN object
# @param param string of the quantity to filter
def hacIncrement(self, scan, quant="DBZH"):
NOD = odim_source.NODfromSource(scan)
hacfile = hacFile(scan)
try:
try:
self.readHac(hacfile)
except IOError:
self.makeHac(hacfile, scan.nrays, scan.nbins)
hac_data = self.hac.getData()
if hac_data.shape != (scan.nrays, scan.nbins):
print hac_data.shape, (scan.nrays, scan.nbins)
raise IOError, "Scan and HAC have different geometries for %s" % NOD
_odc_hac.hacIncrement(scan, self.hac, quant)
self.writeHac(hacfile)
except IOError:
pass
## Convenience functions
## Takes a year-month string and returns the previous month's equivalent string.
# @param YYYYMM year-month string
# @returns year-month string
def lastMonth(YYYYMM):
tt = (int(YYYYMM[:4]), int(YYYYMM[4:6])-1, 1,0,0,0,0,0,-1)
newtt = time.localtime(time.mktime(tt))
return time.strftime("%Y%m", newtt)
## Derives a file string from the input object.
# @param scan that must be an individual SCAN. This SCAN's
# /what/source must contain a valid NOD identifier.
# @param lastmonth boolean specifying whether to read the previous month's file.
# @param boolean specifying whether to build filenames using NOD identifier.
# @returns string file string
def hacFile(scan, lastmonth=False, nod=True):
NOD = odim_source.NODfromSource(scan)
if not nod:
CCCC = odim_source.CCCC[NOD]
RAD = odim_source.RAD[NOD][2:]
elangle = str(int(round(scan.elangle * rd * 10)*10)).zfill(5)
rays = str(scan.nrays).zfill(4)
bins = str(scan.nbins).zfill(4)
YYYYMM = scan.date[:6]
if lastmonth == True:
YYYYMM = lastMonth(YYYYMM)
if nod:
return HACDATA + "/%s_%s_%s_%sx%s_hit-accum.hdf" % (YYYYMM, NOD,
elangle,
rays, bins)
else:
return HACDATA + "/%s_%s_%s_%s_%sx%s_hit-accum.hdf" % (YYYYMM, CCCC,
RAD, elangle,
rays, bins)
## Increments the HAC file(s) for the given object
# @param obj input SCAN or PVOL, can also be a file string
def hacIncrement(obj, quant="DBZH"):
if _polarvolume.isPolarVolume(obj):
incrementPvol(obj, quant)
elif _polarscan.isPolarScan(obj):
incrementScan(obj, quant)
elif type(obj) == types.StringType:
if os.path.isfile(obj) and os.path.getsize(obj):
obj = _raveio.open(obj).object
hacIncrement(obj)
else:
raise TypeError, "HAC incrementor received a string without a matching file, or file is empty"
else:
raise TypeError, "HAC incrementor received neither SCAN nor PVOL as input object"
## Increments the HAC file for this scan. We will assume we only want to deal with DBZH.
# @param scan polar scan object
def incrementScan(scan, quant="TH"):
hac = HAC()
hac.hacIncrement(scan, quant)
## Increments all the HAC files for the scans in a volume, assuming we only wanty to deal with DBZH.
# @param pvol polar volume object
def incrementPvol(pvol, quant="TH"):
for i in range(pvol.getNumberOfScans()):
scan = pvol.getScan(i)
incrementScan(scan, quant)
## Filters the given object
# @param obj input SCAN or PVOL
def hacFilter(obj, quant="DBZH"):
if _polarvolume.isPolarVolume(obj):
filterPvol(obj, quant)
elif _polarscan.isPolarScan(obj):
filterScan(obj, quant)
else:
raise TypeError, "HAC filter received neither SCAN nor PVOL as input"
## Filters this scan. We will assume we only want to deal with DBZH.
# @param scan polar scan object
def filterScan(scan, quant="DBZH"):
hac = HAC()
hac.hacFilter(scan, quant)
## Filters this scan. We will assume we only want to deal with DBZH.
# @param scan polar scan object
def filterPvol(pvol, quant="DBZH"):
hac = HAC()
for i in range(pvol.getNumberOfScans()):
scan = pvol.getScan(i)
hac.hacFilter(scan, quant)
## Multiprocesses the incrementation
# @param fstrs list of input file strings
# @param procs int number of concurrent processes, defaults to the max allowed
# @return list of returned tuples from \ref hacIncrement
def multi_increment(fstrs, procs=None):
pool = multiprocessing.Pool(procs)
results = []
r = pool.map_async(hacIncrement, fstrs, chunksize=1)
r.wait()
pool.terminate()
pool.join()
## Odds and ends below
## Z-diff quality indicator. Takes the difference between uncorrected and corrected reflectivities
# and derives a quality indicator out of it. The threshold is the maximum difference in dBZ
# giving the equivalent of zero quality.
# @param scan Polar scan
# @param thresh float maximum Z-diff allowed
def zdiffScan(scan, thresh=40.0):
if _polarscan.isPolarScan(scan):
if not scan.hasParameter("DBZH") or not scan.hasParameter("TH"):
return
qind = _ravefield.new()
qind.setData(zeros((scan.nrays,scan.nbins), uint8))
qind.addAttribute("how/task", "eu.opera.odc.zdiff")
qind.addAttribute("how/task_args", thresh)
qind.addAttribute("what/gain", 1/255.0)
qind.addAttribute("what/offset", 0.0)
scan.addQualityField(qind)
ret = _odc_hac.zdiff(scan, thresh)
else:
raise TypeError, "Input is expected to be a polar scan. Got something else."
def zdiffPvol(pvol, thresh=40.0):
if _polarvolume.isPolarVolume(pvol):
for i in range(pvol.getNumberOfScans()):
scan = pvol.getScan(i)
zdiffScan(scan, thresh)
else:
raise TypeError, "Input is expected to be a polar volume. Got something else."
def zdiff(obj, thresh=40.0):
if _polarscan.isPolarScan(obj):
zdiffScan(obj, thresh)
elif _polarvolume.isPolarVolume(obj):
zdiffPvol(obj, thresh)
else:
raise TypeError, "Input is expected to be a polar volume or scan"
## Initialize
init()
if __name__ == "__main__":
pass
| {"hexsha": "8ae4d3daed6812889788950149a8e2e5223eb551", "size": 11952, "ext": "py", "lang": "Python", "max_stars_repo_path": "rave_ec/Lib/odc_hac.py", "max_stars_repo_name": "DanielMichelson/drqc_article", "max_stars_repo_head_hexsha": "cd7df2f7290adedb557bbc6ba484d30039a23ce2", "max_stars_repo_licenses": ["CC-BY-4.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "rave_ec/Lib/odc_hac.py", "max_issues_repo_name": "DanielMichelson/drqc_article", "max_issues_repo_head_hexsha": "cd7df2f7290adedb557bbc6ba484d30039a23ce2", "max_issues_repo_licenses": ["CC-BY-4.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "rave_ec/Lib/odc_hac.py", "max_forks_repo_name": "DanielMichelson/drqc_article", "max_forks_repo_head_hexsha": "cd7df2f7290adedb557bbc6ba484d30039a23ce2", "max_forks_repo_licenses": ["CC-BY-4.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.6676056338, "max_line_length": 106, "alphanum_fraction": 0.6373828648, "include": true, "reason": "from numpy", "num_tokens": 3017} |
"""
The ComplementPOVMEffect class and supporting functionality.
"""
#***************************************************************************************************
# Copyright 2015, 2019 National Technology & Engineering Solutions of Sandia, LLC (NTESS).
# Under the terms of Contract DE-NA0003525 with NTESS, the U.S. Government retains certain rights
# in this software.
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except
# in compliance with the License. You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0 or in the LICENSE file in the root pyGSTi directory.
#***************************************************************************************************
import numpy as _np
from pygsti.modelmembers.povms.conjugatedeffect import ConjugatedStatePOVMEffect as _ConjugatedStatePOVMEffect
from pygsti.modelmembers import modelmember as _modelmember
from pygsti.modelmembers.states.fullstate import FullState as _FullState
from pygsti.modelmembers.states.state import State as _State
class ComplementPOVMEffect(_ConjugatedStatePOVMEffect):
"""
TODO: docstring
A POVM effect vector that ensures that all the effects of a POVM sum to the identity.
This POVM effect vector is paramterized as `I - sum(other_spam_vecs)` where `I` is
a (static) identity element and `other_param_vecs` is a list of other spam
vectors in the same parent :class:`POVM`. This only *partially* implements
the model-member interface (some methods such as `to_vector` and `from_vector`
will thunk down to base class versions which raise `NotImplementedError`),
as instances are meant to be contained within a :class:`POVM` which takes
care of vectorization.
Parameters
----------
identity : array_like or POVMEffect
a 1D numpy array representing the static identity operation from
which the sum of the other vectors is subtracted.
other_spamvecs : list of POVMEffects
A list of the "other" parameterized POVM effect vectors which are
subtracted from `identity` to compute the final value of this
"complement" POVM effect vector.
"""
def __init__(self, identity, other_effects):
evotype = other_effects[0]._evotype
state_space = other_effects[0].state_space
self.identity = _FullState(
_State._to_vector(identity), evotype, state_space) # so easy to transform or depolarize by parent POVM
self.other_effects = other_effects
#Note: we assume that our parent will do the following:
# 1) set our gpindices to indicate how many parameters we have
# 2) set the gpindices of the elements of other_spamvecs so
# that they index into our local parameter vector.
_ConjugatedStatePOVMEffect.__init__(self, self.identity.copy())
self.init_gpindices() # initialize our gpindices based on sub-members
self._construct_vector() # reset's self.base
def _construct_vector(self):
#Note: assumes other effects are also ConjugatedStatePOVMEffect objects
base1d = self.state._ptr
base1d.flags.writeable = True
base1d[:] = self.identity.to_dense() - sum([vec.to_dense() for vec in self.other_effects])
base1d.flags.writeable = False
self._ptr_has_changed()
def to_memoized_dict(self, mmg_memo):
"""Create a serializable dict with references to other objects in the memo.
Parameters
----------
mmg_memo: dict
Memo dict from a ModelMemberGraph, i.e. keys are object ids and values
are ModelMemberGraphNodes (which contain the serialize_id). This is NOT
the same as other memos in ModelMember (e.g. copy, allocate_gpindices, etc.).
Returns
-------
mm_dict: dict
A dict representation of this ModelMember ready for serialization
This must have at least the following fields:
module, class, submembers, params, state_space, evotype
Additional fields may be added by derived classes.
"""
mm_dict = super().to_memoized_dict(mmg_memo)
mm_dict['identity_vector'] = self._encodemx(self.identity.to_dense())
return mm_dict
@classmethod
def _from_memoized_dict(cls, mm_dict, serial_memo):
identity = cls._decodemx(mm_dict['identity_vector'])
other_effects = [serial_memo[i] for i in mm_dict['submembers']]
return cls(identity, other_effects)
def _is_similar(self, other, rtol, atol):
""" Returns True if `other` model member (which it guaranteed to be the same type as self) has
the same local structure, i.e., not considering parameter values or submembers """
return (self.identity.shape == other.identity.shape
and _np.allclose(self.identity.to_dense(), other.identity.to_dense(), rtol=rtol, atol=atol))
def submembers(self):
"""
Get the ModelMember-derived objects contained in this one.
Returns
-------
list
"""
# Note: don't include [self.state] because its params aren't ComplementPOVMEffect params
return self.other_effects
@property
def num_params(self):
"""
Get the number of independent parameters which specify this POVM effect vector.
Returns
-------
int
the number of independent parameters.
"""
return len(self.gpindices_as_array())
def to_vector(self):
"""
Get the POVM effect vector parameters as an array of values.
Returns
-------
numpy array
The parameters as a 1D array with length num_params().
"""
raise ValueError(("ComplementPOVMEffect.to_vector() should never be called"
" - use TPPOVM.to_vector() instead"))
def from_vector(self, v, close=False, dirty_value=True):
"""
Initialize the POVM effect vector using a 1D array of parameters.
Parameters
----------
v : numpy array
The 1D vector of POVM effect vector parameters. Length
must == num_params()
close : bool, optional
Whether `v` is close to this POVM effect vector's current
set of parameters. Under some circumstances, when this
is true this call can be completed more quickly.
dirty_value : bool, optional
The value to set this object's "dirty flag" to before exiting this
call. This is passed as an argument so it can be updated *recursively*.
Leave this set to `True` unless you know what you're doing.
Returns
-------
None
"""
#Rely on prior .from_vector initialization of self.other_effects, so
# we just construct our vector based on them.
#Note: this is needed for finite-differencing in map-based calculator
self._construct_vector()
self.dirty = False # dirty_value
def deriv_wrt_params(self, wrt_filter=None):
"""
The element-wise derivative this POVM effect vector.
Construct a matrix whose columns are the derivatives of the POVM effect vector
with respect to a single param. Thus, each column is of length
dimension and there is one column per POVM effect vector parameter.
Parameters
----------
wrt_filter : list or numpy.ndarray
List of parameter indices to take derivative with respect to.
(None means to use all the this operation's parameters.)
Returns
-------
numpy array
Array of derivatives, shape == (dimension, num_params)
"""
if len(self.other_effects) == 0: return _np.zeros((self.dim, 0), 'd') # Complement vecs assumed real
Np = len(self.gpindices_as_array())
neg_deriv = _np.zeros((self.dim, Np), 'd')
for ovec in self.other_effects:
local_inds = _modelmember._decompose_gpindices(
self.gpindices, ovec.gpindices)
#Note: other_vecs are not copies but other *sibling* effect vecs
# so their gpindices index the same space as this complement vec's
# does - so we need to "_decompose_gpindices"
neg_deriv[:, local_inds] += ovec.deriv_wrt_params()
derivMx = -neg_deriv
if wrt_filter is None:
return derivMx
else:
return _np.take(derivMx, wrt_filter, axis=1)
def has_nonzero_hessian(self):
"""
Whether this POVM effect vector has a non-zero Hessian with respect to its parameters.
Returns
-------
bool
"""
return False
| {"hexsha": "76788c766fab1ff456b47ab376d741cf20b4f45f", "size": 8839, "ext": "py", "lang": "Python", "max_stars_repo_path": "pygsti/modelmembers/povms/complementeffect.py", "max_stars_repo_name": "pyGSTi-Developers/pyGSTi", "max_stars_repo_head_hexsha": "bfedc1de4d604f14b0f958615776fb80ddb59e33", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "pygsti/modelmembers/povms/complementeffect.py", "max_issues_repo_name": "pyGSTi-Developers/pyGSTi", "max_issues_repo_head_hexsha": "bfedc1de4d604f14b0f958615776fb80ddb59e33", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "pygsti/modelmembers/povms/complementeffect.py", "max_forks_repo_name": "pyGSTi-Developers/pyGSTi", "max_forks_repo_head_hexsha": "bfedc1de4d604f14b0f958615776fb80ddb59e33", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 41.3037383178, "max_line_length": 115, "alphanum_fraction": 0.6402307953, "include": true, "reason": "import numpy", "num_tokens": 1961} |
# import modules
import itertools
import os
import re
import sys
import xml.etree.ElementTree as ET
import numpy as np
import pandas as pd
from arcgis.features import GeoAccessor
import arcpy
if sys.version_info > (3, 0):
import winreg
else:
import _winreg as winreg
class BA_Data:
def __init__(self):
arcpy.env.overwriteOutput = True
@staticmethod
def _get_child_keys(key_path):
"""
Get the full path of first generation child keys under the parent key listed.
:param key_path: Path to the parent key in registry.
:return: List of the full path to child keys.
"""
# open the parent key
parent_key = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, key_path)
# variables to track progress and store results
error = False
counter = 0
key_list = []
# while everything is going good
while not error:
try:
# get the child key in the iterated position
child_key = winreg.EnumKey(parent_key, counter)
# add the located key to the list
key_list.append('{}\\{}'.format(key_path, child_key))
# increment the counter
counter += 1
# when something blows up...typically because no key is found
except Exception as e:
# switch the error flag to true, stopping the iteration
error = True
# give the accumulated list back
return key_list
def _get_first_child_key(self, key_path, pattern):
"""
Based on the pattern provided, find the key with a matching string in it.
:param key_path: Full string path to the key.
:param pattern: Pattern to be located.
:return: Full path of the first key path matching the provided pattern.
"""
# get a list of paths to keys under the parent key path provided
key_list = self._get_child_keys(key_path)
# iterate the list of key paths
for key in key_list:
# if the key matches the pattern
if key.find(pattern):
# pass back the provided key path
return key
@property
def _usa_key(self):
"""
Get the key for the current ba_data installation of Business Analyst ba_data.
:return: Key for the current ba_data installation of Business Analyst ba_data.
"""
return self._get_first_child_key(r'SOFTWARE\WOW6432Node\Esri\BusinessAnalyst\Datasets', 'USA_ESRI')
@property
def _usa_dataset(self) -> str:
"""
Return the value needed for setting the environment.
:return: String value needed for setting the BA Data Environment setting.
"""
return f'LOCAL;;{os.path.basename(self._usa_key)}'
def set_to_usa_local(self):
"""
Set the environment setting to ensure using locally installed local ba_data.
:return: Boolean indicating if ba_data correctly enriched.
"""
try:
arcpy.env.baDataSource = self._usa_dataset
return True
except:
return False
def _get_business_analyst_key_value(self, locator_key):
"""
In the Business Analyst key, get the value corresponding to the provided locator key.
:param locator_key: Locator key.
:return: Key value.
"""
# open the key to the current installation of Business Analyst ba_data
key = winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, self._usa_key)
# query the value of the locator key
return winreg.QueryValueEx(key, locator_key)[0]
@property
def usa_locator(self) -> str:
"""
Path to the address locator installed with Business Analyst USA ba_data.
:return: String directory path to the address locator installed with Business Analyst USA ba_data.
"""
return self._get_business_analyst_key_value('Locator')
@property
def usa_network_dataset(self) -> str:
"""
Path to the network dataset installed with Business Analyst USA ba_data.
:return: String directory path to the network dataset installed with Business Analyst USA ba_data.
"""
return self._get_business_analyst_key_value('StreetsNetwork')
@property
def usa_data_path(self) -> str:
"""
Path where the Business Analyst USA ba_data is located.
:return: String directory path to where the Business Analyst USA ba_data is installed.
"""
return self._get_business_analyst_key_value('DataInstallDir')
def _create_demographic_layer(self, feature_class_name, layer_name=None):
"""
Esri Business Analyst standard geography layer with ID and NAME fields.
:param feature_class_path: Name of the feature class.
:param layer_name: Output layer name.
:return: Feature Layer
"""
# get the path to the geodatabase where the Esri demographics reside
demographic_dir = os.path.join(self.usa_data_path, 'Data', 'Demographic Data')
gdb_name = [d for d in os.listdir(demographic_dir) if re.match(r'USA_ESRI_\d{4}\.gdb', d)][0]
gdb_path = os.path.join(demographic_dir, gdb_name)
fc_path = os.path.join(gdb_path, feature_class_name)
# create layer map
visible_fields = ['Shape', 'ID', 'NAME']
def _eval_visible(field_name):
if field_name in visible_fields:
return 'VISIBLE'
else:
return 'HIDDEN'
field_map_lst = [' '.join([f.name, f.name, _eval_visible(f.name), 'NONE']) for f in arcpy.ListFields(fc_path)]
field_map = ';'.join(field_map_lst)
# create and return the feature layer
if layer_name:
lyr = arcpy.management.MakeFeatureLayer(fc_path, layer_name, field_info=field_map)[0]
else:
lyr = arcpy.management.MakeFeatureLayer(fc_path, field_info=field_map)[0]
return lyr
@property
def layer_block_group(self) -> arcpy._mp.Layer:
"""
Esri Business Analyst Census Block Group layer with ID and NAME fields.
:return: Feature Layer
"""
return self._create_demographic_layer('BlockGroups_bg', 'block_groups')
@property
def layer_cbsa(self) -> arcpy._mp.Layer:
"""
Esri Business Analyst CBSA layer with ID and NAME fields.
:return: Feature Layer
"""
return self._create_demographic_layer('CBSAs_cb', 'cbsas')
@property
def layer_census_tract(self) -> arcpy._mp.Layer:
"""
Esri Business Analyst Census Tract layer with ID and NAME fields.
:return: Feature Layer
"""
return self._create_demographic_layer('CensusTracts_tr', 'census_tracts')
@property
def layer_congressional_district(self) -> arcpy._mp.Layer:
"""
Esri Business Analyst Congressional District layer with ID and NAME fields.
:return: Feature Layer
"""
return self._create_demographic_layer('CongressionalDistricts_cd', 'congressional_districts')
@property
def layer_county(self) -> arcpy._mp.Layer:
"""
Esri Business Analyst county layer with ID and NAME fields.
:return: Feature Layer
"""
return self._create_demographic_layer('Counties_cy', 'counties')
@property
def layer_county_subdivisions(self) -> arcpy._mp.Layer:
"""
Esri Business Analyst county subdivision layer with ID and NAME fields.
:return: Feature Layer
"""
return self._create_demographic_layer('CountySubdivisions_cs', 'county_subdivision')
@property
def layer_dma(self) -> arcpy._mp.Layer:
"""
Esri Business Analyst DMA layer with ID and NAME fields.
:return: Feature Layer
"""
return self._create_demographic_layer('DMAs_dm', 'dmas')
@property
def layer_places(self) -> arcpy._mp.Layer:
"""
Esri Business Analyst Census Places layer with ID and NAME fields.
:return: Feature Layer
"""
return self._create_demographic_layer('Places_pl', 'places')
@property
def layer_states(self) -> arcpy._mp.Layer:
"""
Esri Business Analyst US States layer with ID and NAME fields.
:return: Feature Layer
"""
return self._create_demographic_layer('States_st', 'states')
@property
def layer_postal_code(self) -> arcpy._mp.Layer:
"""
Esri Business Analyst postal code (zip) layer with ID and NAME fields.
:return: Feature Layer
"""
return self._create_demographic_layer('ZIPCodes_zp', 'postal_codes')
@property
def layer_block_points(self) -> arcpy._mp.Layer:
"""
Esri Business Analyst block points layer - useful for calculating weighted centroids.
:return: Feature Layer
"""
return self._create_demographic_layer()
@property
def layer_blocks(self) -> arcpy._mp.Layer:
"""
US Census Blocks layer
:return: Feature Layer
"""
census_gdb = os.path.join(self.usa_data_path, 'Data', 'UserData', 'census.gdb')
# check to see if the data has benn downloaded - since so big (>3GB), this is problematic to do automatically
if arcpy.Exists(os.path.join(census_gdb, 'Block')):
blocks_fc = os.path.join(census_gdb, 'Block')
elif arcpy.Exists(os.path.join(census_gdb, 'block')):
blocks_fc = os.path.join(census_gdb, 'block')
else:
raise FileNotFoundError(f"The blocks feature class, which should be located at "
f"{os.path.join(census_gdb, 'blocks')} does not appear to exist. You can download "
f"this from "
f"https://www2.census.gov/geo/tiger/TGRGDB18/tlgdb_2018_a_us_block.gdb.zip. Once "
f"downloaded, extract the archive and place the Blocks feature class in "
f"{census_gdb}.")
# when initially downloaded from the US Census, the ID field is GEOID, but change this to be consistent
if 'GEOID' in [f.name for f in arcpy.ListFields(blocks_fc)]:
arcpy.management.AlterField(blocks_fc, field='GEOID', new_field_name='ID', new_field_alias='ID')
return self._create_demographic_layer(blocks_fc, 'blocks')
@property
def layer_businesses(self):
"""Business layer"""
fc_businesses = os.path.join(self.usa_data_path, r'Data\Business Data\BA_BUS_2018.gdb\us_businesses')
return arcpy.management.MakeFeatureLayer(fc_businesses)[0]
def get_business_layer_by_code(self, naics_codes:[int, str, list]=None,
sic_codes:[int, str, list]=None) -> arcpy._mp.Layer:
"""
Get business layer by NAICS and SIC code.
:param naics_code:
:param sic_code:
:return: Layer with definition query applied filtering to just the NAICS and SIC codes provided.
"""
def _get_where_clause(field_name:str, codes:[int, str, list]) -> [str, list]:
if codes is None:
return None
elif isinstance(codes, list) or isinstance(codes, np.array):
codes = [f"{field_name} = '{cd}'" for cd in codes]
return ' OR '.join(codes)
else:
if not isinstance(codes, str):
return str(codes)
else:
return codes
if naics_codes is None and sic_codes is None:
raise Exception('Either NAICS or SIC codes must be provided.')
if naics_codes and sic_codes is None:
sql = _get_where_clause('NAICS', naics_codes)
if naics_codes is None and sic_codes:
sql = _get_where_clause('SIC', sic_codes)
if naics_codes and sic_codes:
sql = f'{_get_where_clause("NAICS", naics_codes)} OR {_get_where_clause("SIC", sic_codes)}'
lyr_bus = self.layer_businesses
lyr_bus.definitionQuery = sql
return lyr_bus
def get_business_layer_by_name(self, business_name:str) -> arcpy._mp.Layer:
"""
Get businesses layer by name.
:param business_name: String, partial or complete, of the business name.
:return: Layer of Businesses
"""
lyr_bus = self.layer_businesses
lyr_bus.definitionQuery = f"CONAME LIKE '%{business_name.upper()}%'"
return lyr_bus
def get_business_competitor_layer(self, business_layer:[arcpy._mp.Layer, str]) -> arcpy._mp.Layer:
"""
Get a layer of competitors from a existing business layer.
:param business_layer:
:return:
"""
# get a list of NAICS codes in the original business layer to use for selecting businesses
naics_code_lst = set(r[0] for r in arcpy.da.SearchCursor(business_layer, 'NAICS'))
naics_sql = ' OR '.join(f"NAICS = '{naics}'" for naics in naics_code_lst)
# get a list of existing business ids and use for exclusion
existing_locnum_lst = [r[0] for r in arcpy.da.SearchCursor(business_layer, 'LOCNUM')]
existing_sql = ' AND '.join([f"LOCNUM <> '{locnum}'" for locnum in existing_locnum_lst])
# combine the naics selection and locnum exclusion
sql = f"{naics_sql} AND ({existing_sql})"
# create the layer and apply the query
comp_lyr = ba_data.layer_businesses
comp_lyr.definitionQuery = sql
return comp_lyr
def _get_data_collection_dir(self):
"""Helper function to retrieve location to find the ba_data collection files"""
dataset_config_file = os.path.join(self.usa_data_path, 'dataset_config.xml')
config_tree = ET.parse(dataset_config_file)
config_root = config_tree.getroot()
config_dir = config_root.find('./data_collections').text
return os.path.join(self.usa_data_path, config_dir)
def _get_out_field_name(self, ge_field_name):
"""Helper function to create field names to look for when trying to enrich from previously enriched ba_data."""
out_field_name = ge_field_name.replace(".", "_")
# if string starts with a set of digits, replace them with Fdigits
out_field_name = re.sub(r"(^\d+)", r"F\1", out_field_name)
# cut to first 64 characters
return out_field_name[:64]
def _get_coll_df(self, coll_file):
"""
Get a dataframe of fields installed locally with Business Analyst in a single collection.
:param coll_file: String name of the collection xml file to scan.
:return: Pandas Dataframe of fields with useful combinations for analysis.
"""
# crack open the xml file and get started
coll_tree = ET.parse(os.path.join(self._get_data_collection_dir(), coll_file))
coll_root = coll_tree.getroot()
# field list to populate with property tuples
fld_lst = []
def _is_hidden(field_ele):
"""Helper to determine if hidden fields."""
if 'HideInDataBrowser' in field_ele.attrib and field_ele.attrib['HideInDataBrowser'] == 'True':
return True
else:
return False
# collect any raw scalar fields
uncalc_ele_fields = coll_root.find('./Calculators/Demographic/Fields')
if uncalc_ele_fields:
fld_lst.append([(field_ele.attrib['Name'], field_ele.attrib['Alias'])
for field_ele in uncalc_ele_fields.findall('Field')
if not _is_hidden(field_ele)])
# collect any calculated field types
calc_ele_fields = coll_root.find('./Calculators/Demographic/CalculatedFields')
if calc_ele_fields:
# since there are two types of calcualted fields, account for this
for field_type in ['PercentCalc', 'Script']:
single_fld_lst = [(field_ele.attrib['Name'], field_ele.attrib['Alias'])
for field_ele in calc_ele_fields.findall(field_type)
if not _is_hidden(field_ele)]
fld_lst.append(single_fld_lst)
# combine the results of both uncalculated and calculated fields located into single result
field_lst = list(itertools.chain.from_iterable(fld_lst))
if len(field_lst):
# create a dataframe with the field information
coll_df = pd.DataFrame(field_lst, columns=['name', 'alias'])
# using the collected information, create the really valuable fields
coll_df['collection_name'] = coll_file.split('.')[0]
coll_df['enrich_str'] = coll_df.apply(lambda row: f"{row['collection_name']}.{row['name']}", axis='columns')
coll_df['enrich_field_name'] = coll_df['enrich_str'].apply(lambda val: self._get_out_field_name(val))
return coll_df
else:
return None
def get_enrich_vars_dataframe(self, drop_duplicates:bool=True) -> pd.DataFrame:
collection_dir = self._get_data_collection_dir()
# get a complete list of collection files
coll_xml_lst = [coll_file for coll_file in os.listdir(collection_dir) if coll_file != 'EnrichmentPacksList.xml']
# get the necessary properties from the collection xml files
coll_df_lst = [self._get_coll_df(coll_file) for coll_file in coll_xml_lst]
coll_df = pd.concat([df for df in coll_df_lst if df is not None])
if drop_duplicates:
coll_df.drop_duplicates('name', inplace=True)
coll_df.sort_values('enrich_str')
coll_df.reset_index(drop=True, inplace=True)
return coll_df
@property
def enrich_vars_dataframe(self) -> pd.DataFrame:
return self.get_enrich_vars_dataframe()
@property
def enrich_vars(self) -> list:
return list(self.enrich_vars_dataframe['enrich_str'].values)
# create instance of ba_data for use
ba_data = BA_Data()
@property
def to_sdf(self) -> pd.DataFrame:
# convert the layer to a spatially enabled dataframe
df = GeoAccessor.from_featureclass(self)
# get rid of the object id field and return the dataframe
return df.drop('OBJECTID', axis=1)
# now, monkeypatch this onto the layer object
arcpy._mp.Layer.sdf = to_sdf | {"hexsha": "2b979221d7533470c5432e722727ca1cebba3e96", "size": 18558, "ext": "py", "lang": "Python", "max_stars_repo_path": "ba_data_paths/ba_data.py", "max_stars_repo_name": "knu2xs/ba_data_paths", "max_stars_repo_head_hexsha": "c161feec529882a2edfb2ed88b8a89cf07ec3243", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "ba_data_paths/ba_data.py", "max_issues_repo_name": "knu2xs/ba_data_paths", "max_issues_repo_head_hexsha": "c161feec529882a2edfb2ed88b8a89cf07ec3243", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "ba_data_paths/ba_data.py", "max_forks_repo_name": "knu2xs/ba_data_paths", "max_forks_repo_head_hexsha": "c161feec529882a2edfb2ed88b8a89cf07ec3243", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 38.5020746888, "max_line_length": 120, "alphanum_fraction": 0.6364910012, "include": true, "reason": "import numpy", "num_tokens": 4079} |
From Mtac2 Require Import Mtac2 CompoundTactics.
Import T.
Import T.notations.
Import CT.
Import CT.notations.
Example exabs (x : nat) : x = 1 -> 1 = x.
MProof.
intro H.
simple_rewrite H.
reflexivity.
Qed.
Example exabs2 (x : nat) : S x = 1 -> 1 = S x.
MProof.
intro H.
simple_rewrite H.
reflexivity.
Qed.
Require Import Strings.String.
Example exabs2' (x : nat) : S x = 1 -> 1 = S x.
MProof.
intro H.
variabilize (S x) as t.
assert (B:t = S x).
reflexivity.
Abort.
Require Import Arith.
Example exif (x : nat) : if beq_nat (S x) 1 then x = 0 : Type else True.
MProof.
variabilize (beq_nat (S x) (S 0)) as t.
assert (B:t = beq_nat (S x) 1).
reflexivity.
Abort.
Definition sillyfun (n : nat) : bool :=
if beq_nat n 3 then false
else if beq_nat n 5 then false
else false.
Theorem sillyfun_false : forall (n : nat),
(sillyfun n = false) : Type.
MProof.
intros n. unfold sillyfun.
variabilize (beq_nat n 3) as t3.
destruct t3.
simpl. reflexivity.
simpl.
variabilize (beq_nat _ _) as t5.
destruct t5 &> reflexivity.
Qed.
Definition sillyfun1 (n : nat) : bool :=
if beq_nat n 3 then true
else if beq_nat n 5 then true
else false.
Fixpoint evenb (n:nat) : bool :=
match n with
| O => true
| S O => false
| S (S n') => evenb n'
end.
Definition oddb (n:nat) : bool := negb (evenb n).
Theorem sillyfun1_odd : forall (n : nat),
(sillyfun1 n = true ->
oddb n = true) : Type .
MProof.
intros n. unfold sillyfun1.
variabilize (beq_nat n 3) as t.
assert (Heqe3 : t = (n =? 3)%nat) |1> reflexivity.
move_back Heqe3.
destruct t &> intro Heqe3.
Abort.
| {"author": "Mtac2", "repo": "Mtac2", "sha": "d16c2e682d5ab18ed77b13b4fd60a42a65c4f958", "save_path": "github-repos/coq/Mtac2-Mtac2", "path": "github-repos/coq/Mtac2-Mtac2/Mtac2-d16c2e682d5ab18ed77b13b4fd60a42a65c4f958/tests/comptactics.v"} |
"""
* GTDynamics Copyright 2020, Georgia Tech Research Corporation,
* Atlanta, Georgia 30332-0415
* All Rights Reserved
* See LICENSE for the license information
*
* @file test_four_bar.py
* @brief Unit tests for inverse dynamics of a four bar linkage.
* @author Frank Dellaert, Varun Agrawal, Mandy Xie, Alejandro Escontrela, and Yetong Zhang
"""
# pylint: disable=no-member, no-name-in-module
import unittest
import gtsam
from gtsam import Pose3, Rot3
import numpy as np
import gtdynamics as gtd
class TestFourBar(unittest.TestCase):
"""Create a 4-bar linkage manually and test it."""
def test_four_bar(self):
""" Testing for four bar linkage. """
# construct links
inertia = np.eye(3)
l1_pose = Pose3(Rot3.Rz(0), (0, 0, 0))
l2_pose = Pose3(Rot3.Rz(np.pi / 2), (2, 0, 0))
l3_pose = Pose3(Rot3.Rz(np.pi), (2, 2, 0))
l4_pose = Pose3(Rot3.Rz(np.pi * 3 / 2), (0, 2, 0))
com = Pose3(Rot3(), (1, 0, 0))
link1 = gtd.Link(1, "l1", 1, inertia, l1_pose, com)
link2 = gtd.Link(2, "l2", 1, inertia, l2_pose, com)
link3 = gtd.Link(3, "l3", 1, inertia, l3_pose, com)
link4 = gtd.Link(4, "l4", 1, inertia, l4_pose, com, True)
links = {"l1": link1, "l2": link2, "l3": link3, "l4": link4}
# construct joints
params = gtd.JointParams()
axis = np.array([0, 0, 1])
j1_pose = Pose3(Rot3.Rz(0), (2, 0, 0))
j2_pose = Pose3(Rot3.Rz(0), (2, 2, 0))
j3_pose = Pose3(Rot3.Rz(0), (0, 2, 0))
j4_pose = Pose3(Rot3.Rz(0), (0, 0, 0))
joint1 = gtd.RevoluteJoint(1, "j1", j1_pose, link1, link2, params,
axis)
joint2 = gtd.RevoluteJoint(2, "j2", j2_pose, link2, link3, params,
axis)
joint3 = gtd.RevoluteJoint(3, "j3", j3_pose, link3, link4, params,
axis)
joint4 = gtd.RevoluteJoint(4, "j4", j4_pose, link4, link1, params,
axis)
joints = {"j1": joint1, "j2": joint2, "j3": joint3, "j4": joint4}
# connect links to joints
# TODO(frank): non-functional. And not logical: why do links know about joints?
link1.addJoint(joint4)
link1.addJoint(joint1)
link2.addJoint(joint1)
link2.addJoint(joint2)
link3.addJoint(joint2)
link3.addJoint(joint3)
link4.addJoint(joint3)
link4.addJoint(joint4)
# construct robot
robot = gtd.Robot(links, joints)
# print(robot)
# construct dynamics graph
opt_setting = gtd.OptimizerSetting()
gravity = np.array([0, 0, 0])
planar_axis = np.array([0, 0, 1])
graph_builder = gtd.DynamicsGraph(opt_setting, gravity, planar_axis)
graph = graph_builder.dynamicsFactorGraph(robot, 0, None, None)
known_values = gtsam.Values()
joint_angles = np.array([0, 0, 0, 0])
joint_vels = np.array([0, 0, 0, 0])
torques = np.array([1, 0, 0, 0])
for idx, joint in enumerate(robot.joints()):
gtd.InsertJointAngleDouble(known_values, joint.id(), 0,
joint_angles[idx])
gtd.InsertJointVelDouble(known_values, joint.id(), 0, joint_vels[idx])
gtd.InsertTorqueDouble(known_values, joint.id(), 0, torques[idx])
prior_graph = graph_builder.forwardDynamicsPriors(
robot, 0, known_values)
graph.push_back(prior_graph)
# construct init values and solve
init_values = gtd.ZeroValues(robot, 0, 0)
optimizer = gtsam.LevenbergMarquardtOptimizer(graph, init_values)
result = optimizer.optimize()
a1_key = gtd.internal.JointAccelKey(1, 0).key()
a1 = result.atDouble(a1_key)
self.assertAlmostEqual(a1, 0.125, 5) # regression. Show work!
if __name__ == "__main__":
unittest.main()
| {"hexsha": "a3a69f14b1156a6ae26b955bc39ae05511c53bf2", "size": 3957, "ext": "py", "lang": "Python", "max_stars_repo_path": "python/tests/test_four_bar.py", "max_stars_repo_name": "danbarla/GTDynamics", "max_stars_repo_head_hexsha": "0448b359aff9e0e784832666e4048ee01c8b082d", "max_stars_repo_licenses": ["BSD-2-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "python/tests/test_four_bar.py", "max_issues_repo_name": "danbarla/GTDynamics", "max_issues_repo_head_hexsha": "0448b359aff9e0e784832666e4048ee01c8b082d", "max_issues_repo_licenses": ["BSD-2-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "python/tests/test_four_bar.py", "max_forks_repo_name": "danbarla/GTDynamics", "max_forks_repo_head_hexsha": "0448b359aff9e0e784832666e4048ee01c8b082d", "max_forks_repo_licenses": ["BSD-2-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.3027522936, "max_line_length": 91, "alphanum_fraction": 0.5850391711, "include": true, "reason": "import numpy", "num_tokens": 1212} |
[STATEMENT]
lemma wtpd_res:
"\<lbrakk> method (G, D) (md, pTs) = Some (D, rT, (pns, lvars, blk, res));
wf_prog wf_java_mdecl G; is_class G D \<rbrakk>
\<Longrightarrow> wtpd_expr (env_of_jmb G D (md, pTs)) res"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>method (G, D) (md, pTs) = Some (D, rT, pns, lvars, blk, res); wf_java_prog G; is_class G D\<rbrakk> \<Longrightarrow> wtpd_expr (env_of_jmb G D (md, pTs)) res
[PROOF STEP]
apply (simp add: wtpd_expr_def env_of_jmb_def)
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. \<lbrakk>method (G, D) (md, pTs) = Some (D, rT, pns, lvars, blk, res); wf_java_prog G; is_class G D\<rbrakk> \<Longrightarrow> \<exists>T. (G, map_of lvars(pns [\<mapsto>] pTs, This \<mapsto> Class D)) \<turnstile> res :: T
[PROOF STEP]
apply (frule_tac P="%D (md, pTs) (rT, (pns, lvars, blk, res)). \<exists>T. (G, map_of lvars(pns[\<mapsto>]pTs)(This\<mapsto>Class D)) \<turnstile> res :: T " in method_preserves)
[PROOF STATE]
proof (prove)
goal (4 subgoals):
1. \<lbrakk>method (G, D) (md, pTs) = Some (D, rT, pns, lvars, blk, res); wf_java_prog G; is_class G D\<rbrakk> \<Longrightarrow> is_class G ?C1
2. \<lbrakk>method (G, D) (md, pTs) = Some (D, rT, pns, lvars, blk, res); wf_java_prog G; is_class G D\<rbrakk> \<Longrightarrow> \<forall>S rT mb. \<forall>cn\<in>fst ` set G. wf_mdecl wf_java_mdecl G cn (S, rT, mb) \<longrightarrow> (case S of (md, pTs) \<Rightarrow> \<lambda>(rT, pns, lvars, blk, res). \<exists>T. (G, map_of lvars(pns [\<mapsto>] pTs, This \<mapsto> Class cn)) \<turnstile> res :: T) (rT, mb)
3. \<lbrakk>method (G, D) (md, pTs) = Some (D, rT, pns, lvars, blk, res); wf_java_prog G; is_class G D\<rbrakk> \<Longrightarrow> method (G, ?C1) ?S1 = Some (?D1, ?rT1, ?mb1)
4. \<lbrakk>method (G, D) (md, pTs) = Some (D, rT, pns, lvars, blk, res); wf_java_prog G; is_class G D; (case ?S1 of (md, pTs) \<Rightarrow> \<lambda>(rT, pns, lvars, blk, res). \<exists>T. (G, map_of lvars(pns [\<mapsto>] pTs, This \<mapsto> Class ?D1)) \<turnstile> res :: T) (?rT1, ?mb1)\<rbrakk> \<Longrightarrow> \<exists>T. (G, map_of lvars(pns [\<mapsto>] pTs, This \<mapsto> Class D)) \<turnstile> res :: T
[PROOF STEP]
apply (auto simp: wf_mdecl_def wf_java_mdecl_def)
[PROOF STATE]
proof (prove)
goal:
No subgoals!
[PROOF STEP]
done | {"llama_tokens": 1071, "file": null, "length": 4} |
#include "version.h"
#include "githash.h"
#include "fastqlib.h"
#include <boost/program_options.hpp>
namespace po = boost::program_options;
using namespace std;
string percent(int num,int den) {
char buffer[100];
sprintf(buffer,"%d / %d\t( %.2f %% )\t",num,den,100. * float(num)/float(den));
return(buffer);
}
int checkParameters(int argc,char **argv,po::variables_map & vm) {
po::options_description desc("Allowed options");
desc.add_options()
("help,h", "produce help message")
("r1,1", po::value<string>(), "read 1 in fastq format (gzip allowed)")
("r2,2", po::value<string>(), "read 2 in fastq format (gzip allowed)")
("output-prefix,O", po::value<string>(), "output prefix")
("rc", "reverse-complement reads");
po::store(po::parse_command_line(argc, argv, desc), vm);
po::notify(vm);
if (vm.count("help") || argc==1) {
cout << desc << "\n";
exit(1);
}
if (!vm.count("r1") || !vm.count("r2") || !vm.count("output-prefix")) {
cout << "Missing input!"<<endl;
exit(1);
}
return(0);
}
int main(int argc,char **argv) {
cout << "\nmergeReads "<<VERSION<<" "<<HASH<<"\nSimple utility for creating an interleaved fastq file from two separate R1/R2 files."<<endl<<endl;
po::variables_map opt;
checkParameters(argc,argv,opt);
string r1 = opt["r1"].as<string>();
string r2 = opt["r2"].as<string>();
string prefix = opt["output-prefix"].as<string>();
bool rc = opt.count("rc");
cout << "Merging:\nR1:\t" <<r1<<"\nR2:\t"<<r2<<endl;
cout << "Output: " << prefix <<".fastq.gz"<<endl;
if(rc)
cout << "Reads will be reverse-complemented."<<endl;
pairReader infile(r1,r2);
fastqWriter outfile(prefix+".fastq.gz");
readPair p;
int npass=0;
int nread=0;
while(infile.getPair(p)) {
if(!p.filtered) {
if(rc) {
p.rc();
outfile.write(p);
}
else outfile.write(p);
npass++;
}
nread++;
if(nread%10000==0)
cout << "READ PAIR "<<nread<<endl;
}
cout << percent(npass,nread) << "reads passed chastity/purity filters."<<endl;
return(0);
}
| {"hexsha": "d3c15343cc73da15a8170e4de55b1c8c26a0fc43", "size": 2104, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "mergeReads.cpp", "max_stars_repo_name": "CarolineOhrman/NxTrim", "max_stars_repo_head_hexsha": "c47acfa0a5fb086b7d470e9566490d838b7be1a7", "max_stars_repo_licenses": ["BSD-2-Clause"], "max_stars_count": 1.0, "max_stars_repo_stars_event_min_datetime": "2021-04-23T21:23:21.000Z", "max_stars_repo_stars_event_max_datetime": "2021-04-23T21:23:21.000Z", "max_issues_repo_path": "mergeReads.cpp", "max_issues_repo_name": "CarolineOhrman/NxTrim", "max_issues_repo_head_hexsha": "c47acfa0a5fb086b7d470e9566490d838b7be1a7", "max_issues_repo_licenses": ["BSD-2-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "mergeReads.cpp", "max_forks_repo_name": "CarolineOhrman/NxTrim", "max_forks_repo_head_hexsha": "c47acfa0a5fb086b7d470e9566490d838b7be1a7", "max_forks_repo_licenses": ["BSD-2-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 27.3246753247, "max_line_length": 148, "alphanum_fraction": 0.6007604563, "num_tokens": 623} |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
External Tensor Functions
=========================
**Author**: `Tianqi Chen <https://tqchen.github.io>`_
While TVM supports transparent code generation, sometimes
it is also helpful to incorporate manual written code into
the pipeline. For example, we might want to use cuDNN for
some of the convolution kernels and define the rest of the stages.
TVM supports these black box function calls natively.
Specfically, tvm support all the tensor functions that are DLPack compatible.
Which means we can call any function with POD types(pointer, int, float)
or pointer to DLTensor as argument.
"""
from __future__ import absolute_import, print_function
import tvm
import numpy as np
from tvm.contrib import cblas
######################################################################
# Use Extern Tensor Function
# --------------------------
# In the example below, we use :any:`tvm.extern` to add an extern
# array function call. In the extern call, we declare the shape
# of output tensors. In the second argument we provide the list of inputs.
#
# User will need to provide a function describing how to compute the result.
# The compute function takes list of symbolic placeholder for the inputs,
# list of symbolic placeholder for the outputs and returns the executing statement.
#
# In this case we simply call a registered tvm function, which invokes a CBLAS call.
# TVM does not control internal of the extern array function and treats it as blackbox.
# We can further mix schedulable TVM calls that add a bias term to the result.
#
n = 1024
l = 128
m = 235
bias = tvm.var('bias', dtype=tvm.float32)
A = tvm.placeholder((n, l), name='A')
B = tvm.placeholder((l, m), name='B')
C = tvm.extern((n, m), [A, B],
lambda ins, outs: tvm.call_packed(
"tvm.contrib.cblas.matmul",
ins[0], ins[1], outs[0], False, False), name="C")
D = tvm.compute(C.shape, lambda i, j: C[i,j] + bias, name="D")
s = tvm.create_schedule(D.op)
######################################################################
# Verify the Result
# -----------------
# We can verify that the result matches what we expected.
#
ctx = tvm.cpu(0)
f = tvm.build(s, [A, B, D, bias], "llvm")
a = tvm.nd.array(np.random.uniform(size=(n, l)).astype(A.dtype), ctx)
b = tvm.nd.array(np.random.uniform(size=(l, m)).astype(B.dtype), ctx)
d = tvm.nd.array(np.zeros((n, m), dtype=D.dtype), ctx)
bb = 10.0
f(a, b, d, bb)
tvm.testing.assert_allclose(
d.asnumpy(), np.dot(a.asnumpy(), b.asnumpy()) + 10, rtol=1e-5)
######################################################################
# Extern Contrib Wrappers
# -----------------------
# TVM also provide extern contrib wrappers to useful extern calls,
# the following line is equivalent to the previous example.
#
from tvm.contrib import cblas
C = cblas.matmul(A, B)
D = tvm.compute(C.shape, lambda i, j: C[i,j] + bias, name="D")
s = tvm.create_schedule(D.op)
######################################################################
# Hook Python Function as Extern
# ------------------------------
# Since we can call into any PackedFunc in TVM. We can use the extern
# function to callback into python.
#
# The following example registers a python function into tvm runtime system
# and use it to complete one stage of the computation.
# This makes TVM much more flexible. For example, we can insert front-end
# callbacks to inspect the intermediate results or mix customized code
# with TVM.
#
@tvm.register_func("tvm.contrib.my_tvm_addone")
def my_tvm_addone(x, y):
print("my_tvm_addone signatures: %s, %s" % (type(x), type(y)))
tvm.nd.array(x.asnumpy() + 1).copyto(y)
A = tvm.placeholder((n,), name='A')
B = tvm.extern(A.shape, [A], lambda ins, outs: tvm.call_packed(
"tvm.contrib.my_tvm_addone", ins[0], outs[0]), name="C")
s = tvm.create_schedule(B.op)
f = tvm.build(s, [A, B], "llvm")
a = tvm.nd.array(np.random.uniform(size=(n,)).astype(A.dtype), ctx)
b = tvm.nd.array(np.random.uniform(size=(n,)).astype(B.dtype), ctx)
f(a, b)
tvm.testing.assert_allclose(b.asnumpy(), a.asnumpy() + 1, rtol=1e-5)
######################################################################
# Summary
# -------
# - TVM calls extern tensor function via :any:`tvm.extern`
# - Use contrib wrappers for short sugars of extern tensor calls.
# - We can hook front-end function as extern tensor callbacks.
#
| {"hexsha": "071968ce2b1fa4fd3020bd516724858533f979ad", "size": 5147, "ext": "py", "lang": "Python", "max_stars_repo_path": "tutorials/language/extern_op.py", "max_stars_repo_name": "baowenlei/tvm", "max_stars_repo_head_hexsha": "6b2e18ed96fad26b4a5e5f8a6dcbedf9206c9a65", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2019-04-26T18:35:40.000Z", "max_stars_repo_stars_event_max_datetime": "2019-04-26T18:35:40.000Z", "max_issues_repo_path": "tutorials/language/extern_op.py", "max_issues_repo_name": "baowenlei/tvm", "max_issues_repo_head_hexsha": "6b2e18ed96fad26b4a5e5f8a6dcbedf9206c9a65", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tutorials/language/extern_op.py", "max_forks_repo_name": "baowenlei/tvm", "max_forks_repo_head_hexsha": "6b2e18ed96fad26b4a5e5f8a6dcbedf9206c9a65", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2019-05-24T17:22:38.000Z", "max_forks_repo_forks_event_max_datetime": "2019-06-14T23:30:24.000Z", "avg_line_length": 40.8492063492, "max_line_length": 87, "alphanum_fraction": 0.6531960365, "include": true, "reason": "import numpy", "num_tokens": 1266} |
[STATEMENT]
lemma len_Suc_mult2[simp]: "len (Suc (2 * x)) = Suc (len x)"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. len (Suc (2 * x)) = Suc (len x)
[PROOF STEP]
proof (induct x rule: len.induct)
[PROOF STATE]
proof (state)
goal (3 subgoals):
1. len (Suc (2 * 0)) = Suc (len 0)
2. len (Suc (2 * Suc 0)) = Suc (len (Suc 0))
3. \<And>va. len (Suc (2 * (Suc (Suc va) div 2))) = Suc (len (Suc (Suc va) div 2)) \<Longrightarrow> len (Suc (2 * Suc (Suc va))) = Suc (len (Suc (Suc va)))
[PROOF STEP]
show "len (Suc (2 * Suc 0)) = Suc (len (Suc 0))"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. len (Suc (2 * Suc 0)) = Suc (len (Suc 0))
[PROOF STEP]
by (metis div_less One_nat_def div2_Suc_Suc len.simps(3) lessI mult.right_neutral numeral_2_eq_2)
[PROOF STATE]
proof (state)
this:
len (Suc (2 * Suc 0)) = Suc (len (Suc 0))
goal (2 subgoals):
1. len (Suc (2 * 0)) = Suc (len 0)
2. \<And>va. len (Suc (2 * (Suc (Suc va) div 2))) = Suc (len (Suc (Suc va) div 2)) \<Longrightarrow> len (Suc (2 * Suc (Suc va))) = Suc (len (Suc (Suc va)))
[PROOF STEP]
qed auto | {"llama_tokens": 530, "file": "Formula_Derivatives_Presburger_Formula", "length": 4} |
[STATEMENT]
lemma conjuncts: "(\<forall> q\<in> set (conjuncts p). Ifm bs q) = Ifm bs p"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. (\<forall>q\<in>set (conjuncts p). Ifm bs q) = Ifm bs p
[PROOF STEP]
by (induct p rule: conjuncts.induct) auto | {"llama_tokens": 111, "file": null, "length": 1} |
export Problem, loss, lossgrad, lossngrad, jacobian, valueandjacobian
export project, normcs, sizen, sizep, predictor, prediction
type Problem{L<:AbLoss,
B<:AbBasis,
CS<:AbConstraintSet}
X :: Matrix
y :: Vector
loss :: L
ϕ :: Type{B}
Ω :: CS
end
Problem(X, y, l, B, ::Type{BasisL2Constraint}) = Problem(X, y, l, B, BasisL2Constraint(B, X))
Problem(X, y, l, B, ::Type{CS}) where CS<:AbConstraintSet = Problem(X, y, l, B, CS())
################### Pass through sub-sub-routines #############################
loss(prob::Problem, z::Vector) = prob.loss(prob.y, z)
lossgrad(prob::Problem, z::Vector) = derv(prob.loss, prob.y, z)
lossngrad(prob::Problem, z::Vector) = valnderv(prob.loss, prob.y, z)
jacobian(prob::Problem{L,B}, u) where {L,B} = jacobian(B, prob.X, u)
valueandjacobian(prob::Problem{L, B}, u) where {L,B} = valueandjacobian(B, prob.X, u)
project(prob::Problem, u) = project(prob.Ω, u)
normcs(prob::Problem, u) = normcs(prob.Ω, u)
############################### Sub-routines ##################################
### Basics
sizen(prob::Problem) = length(prob.y)
sizep(prob::Problem) = size(prob.X)[2]
### Advanced
predictor(X::Matrix, ω::AbBasis) = calc(ω, X)
predictor(prob::Problem, ω::AbBasis) = predictor(prob.X, ω)
predictor(prob::Problem{L, B}, u::Vector) where {L,B<:AbUniDirBasis}= calc(B, prob.X, u)
prediction(X::Matrix{<:Real}, ωs::Vector{<:AbBasis}, βs::Vector{<:Real}) = sum(βs .* calc.(ωs, (X,)))
prediction(prob::Problem, args...) = prediction(prob.X, args...)
############################### Splitter ##################################
function getsplitter(prob::Problem)
n, p = size(prob.X)
medians = [median(prob.X[:, j]) for j in 2:p]
actives = [prob.X[:, j] .≥ medians[j-1] for j in 2:p]
u0s = zeros(p, p-1)
u0s[1, :] = medians
for j in 2:p
u0s[j, j-1] = -1.
u0s[:, j-1] /= normcs(prob, u0s[:, j-1])
end
j -> u0s[:, j-1]
end
function getsplits(prob::Problem{L, B}) where {L, B<:AbBasis}
spliter = getsplitter(prob)
[B(su*spliter(j), s) for j in 2:sizep(prob) for su in (-1., 1.) for s in (-1., 1.)]
end
function cardinalsplitdirs(prob::Problem{L, B}) where {L,B}
stdlib = B[]
p = sizep(prob)
for j in 2:p
u = zeros(p)
u[j] = 1
u[1] = -median(prob.X[:, j])
u /= normcs(prob, u)
for su in (-1., 1.)
for s in (-1., 1.)
push!(stdlib, B(su*u, s))
end end end
stdlib
end
function randomsplitdir(prob::Problem{L, B}) where {L,B}
p = sizep(prob)
u = randn(p)
u[1] = 0.
Xu = prob.X*u
med = median(Xu)
u[1] = -med
u /= normcs(prob, u)
su, s = rand([-1., 1.], 2)
B(su*u, s)
end
randsplitdirs(prob, num) = [randomsplitdir(prob) for j in 1:num]
| {"hexsha": "728072f84d7ca791ae9d7966ac9f6583e7ad5159", "size": 2789, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/Tproblem.jl", "max_stars_repo_name": "rakeshvar/AnyBoost.jl", "max_stars_repo_head_hexsha": "f2477b1737e8834d622b1e3b7269bd133b25e544", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 10, "max_stars_repo_stars_event_min_datetime": "2019-06-27T20:40:01.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-22T07:35:44.000Z", "max_issues_repo_path": "src/Tproblem.jl", "max_issues_repo_name": "rakeshvar/AnyBoost.jl", "max_issues_repo_head_hexsha": "f2477b1737e8834d622b1e3b7269bd133b25e544", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 4, "max_issues_repo_issues_event_min_datetime": "2019-06-22T03:59:40.000Z", "max_issues_repo_issues_event_max_datetime": "2020-09-18T05:27:02.000Z", "max_forks_repo_path": "src/Tproblem.jl", "max_forks_repo_name": "rakeshvar/AnyBoost.jl", "max_forks_repo_head_hexsha": "f2477b1737e8834d622b1e3b7269bd133b25e544", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.0574712644, "max_line_length": 101, "alphanum_fraction": 0.5514521334, "num_tokens": 958} |
# STUMPY
# Copyright 2019 TD Ameritrade. Released under the terms of the 3-Clause BSD license. # noqa: E501
# STUMPY is a trademark of TD Ameritrade IP Company, Inc. All rights reserved.
from collections import deque
import numpy as np
def atsc(IL, IR, j):
"""
Compute the anchored time series chain (ATSC).
Parameters
----------
IL : ndarray
Left matrix profile indices
IR : ndarray
Right matrix profile indices
j : int
The index value for which to compute the ATSC
Returns
-------
output : ndarray
Anchored time series chain for index, `j`
Notes
-----
`DOI: 10.1109/ICDM.2017.79 <https://www.cs.ucr.edu/~eamonn/chains_ICDM.pdf>`__
See Table I
This is the implementation for the anchored time series chains (ATSC).
Unlike the original paper, we've replaced the while-loop with a more stable
for-loop.
"""
C = deque([j])
for i in range(IL.size):
if IR[j] == -1 or IL[IR[j]] != j:
break
else:
j = IR[j]
C.append(j)
return np.array(list(C), dtype=np.int64)
def allc(IL, IR):
"""
Compute the all-chain set (ALLC).
Parameters
----------
IL : ndarray
Left matrix profile indices
IR : ndarray
Right matrix profile indices
Returns
-------
S : list(ndarray)
All-chain set
C : ndarray
Anchored time series chain for the longest chain (also known as the
unanchored chain)
Notes
-----
`DOI: 10.1109/ICDM.2017.79 <https://www.cs.ucr.edu/~eamonn/chains_ICDM.pdf>`__
See Table II
Unlike the original paper, we've replaced the while-loop with a more stable
for-loop.
This is the implementation for the all-chain set (ALLC) and the unanchored
chain is simply the longest one among the all-chain set. Both the
all-chain set and unanchored chain are returned.
The all-chain set, S, is returned as a list of unique numpy arrays.
"""
L = np.ones(IL.size, dtype=np.int64)
S = set() # type: ignore
for i in range(IL.size):
if L[i] == 1:
j = i
C = deque([j])
for k in range(IL.size):
if IR[j] == -1 or IL[IR[j]] != j:
break
else:
j = IR[j]
L[j] = -1
L[i] = L[i] + 1
C.append(j)
S.update([tuple(C)])
C = atsc(IL, IR, L.argmax())
S = [np.array(s, dtype=np.int64) for s in S] # type: ignore
return S, C # type: ignore
| {"hexsha": "7c3e59c3a5c561399c0fb25d7ac161821c66aeb5", "size": 2625, "ext": "py", "lang": "Python", "max_stars_repo_path": "stumpy/chains.py", "max_stars_repo_name": "abbasnikbakht/stumpy", "max_stars_repo_head_hexsha": "3d0f5e165ade7dfdd4dad19ba5f71026345c551c", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-08-10T20:22:44.000Z", "max_stars_repo_stars_event_max_datetime": "2020-08-10T20:22:44.000Z", "max_issues_repo_path": "stumpy/chains.py", "max_issues_repo_name": "abbasnikbakht/stumpy", "max_issues_repo_head_hexsha": "3d0f5e165ade7dfdd4dad19ba5f71026345c551c", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "stumpy/chains.py", "max_forks_repo_name": "abbasnikbakht/stumpy", "max_forks_repo_head_hexsha": "3d0f5e165ade7dfdd4dad19ba5f71026345c551c", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 24.5327102804, "max_line_length": 99, "alphanum_fraction": 0.5588571429, "include": true, "reason": "import numpy", "num_tokens": 690} |
[STATEMENT]
lemma mono2mono:
assumes "monotone ordb leq (\<lambda>y. f y)" "monotone orda ordb (\<lambda>x. t x)"
shows "monotone orda leq (\<lambda>x. f (t x))"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. monotone orda leq (\<lambda>x. f (t x))
[PROOF STEP]
using assms
[PROOF STATE]
proof (prove)
using this:
monotone ordb leq f
monotone orda ordb t
goal (1 subgoal):
1. monotone orda leq (\<lambda>x. f (t x))
[PROOF STEP]
by(rule monotone2monotone) simp_all | {"llama_tokens": 206, "file": null, "length": 2} |
import random
import numpy as np
class Dataset:
"""
A mapping from column names to immutable arrays of equal length.
"""
def __init__(self, **data):
self._data = {}
self._length = None
super().__init__()
for column, data in data.items():
self[column] = data
@property
def columns(self):
return sorted(self._data.keys())
def copy(self):
data = {x: self[x].copy() for x in self.columns}
return type(self)(**data)
def sample(self, size):
indices = random.sample(range(len(self)), size)
return self[indices]
def __len__(self):
return self._length
def __contains__(self, column):
return column in self._data
def __getattr__(self, column):
if column in self:
return self[column]
raise AttributeError
def __iter__(self):
for index in range(len(self)):
yield tuple(self[x][index] for x in self.columns)
def __eq__(self, other):
if not isinstance(other, type(self)):
return NotImplemented
if self.columns != other.columns:
return False
for column in self.columns:
if not (self[column] == other[column]).all():
return False
return True
def __getitem__(self, key):
if isinstance(key, slice):
data = {x: self[x][key] for x in self.columns}
return type(self)(**data)
if isinstance(key, (tuple, list)) and isinstance(key[0], int):
data = {x: self[x][key] for x in self.columns}
return type(self)(**data)
if isinstance(key, (tuple, list)) and isinstance(key[0], str):
data = {x: self[x] for x in key}
return type(self)(**data)
return self._data[key].copy()
def __setitem__(self, key, data):
if isinstance(key, (tuple, list)) and isinstance(key[0], str):
for column, data in zip(key, data):
self[column] = data
return
if isinstance(key, (tuple, list)) and isinstance(key[0], int):
raise NotImplementedError('column content is immutable')
data = np.array(data)
data.setflags(write=False)
if not data.size:
raise ValueError('must not be empty')
if not self._length:
self._length = len(data)
if len(data) != self._length:
raise ValueError('must have same length')
self._data[key] = data
def __delitem__(self, key):
if isinstance(key, (tuple, list)):
for column in key:
del self._data[column]
return
del self._data[key]
def __str__(self):
message = ''
for column in self.columns:
message += '{} ({}):\n\n'.format(column, self[column].dtype)
message += str(self[column]) + '\n\n'
return message
def __getstate__(self):
return {'length': self._length, 'data': self._data}
def __setstate__(self, state):
self._length = state['length']
self._data = state['data']
| {"hexsha": "b890ea8f1dfb10602452f7f081689d7b823747de", "size": 3250, "ext": "py", "lang": "Python", "max_stars_repo_path": "sets-master/sets-master/sets/core/dataset.py", "max_stars_repo_name": "FedericoMolinaChavez/tesis-research", "max_stars_repo_head_hexsha": "d77cc621d452c9ecf48d9ac80349b41aeb842412", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "sets-master/sets-master/sets/core/dataset.py", "max_issues_repo_name": "FedericoMolinaChavez/tesis-research", "max_issues_repo_head_hexsha": "d77cc621d452c9ecf48d9ac80349b41aeb842412", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 4, "max_issues_repo_issues_event_min_datetime": "2021-03-09T20:33:57.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-18T12:56:32.000Z", "max_forks_repo_path": "sets-master/sets-master/sets/core/dataset.py", "max_forks_repo_name": "FedericoMolinaChavez/tesis-research", "max_forks_repo_head_hexsha": "d77cc621d452c9ecf48d9ac80349b41aeb842412", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.5533980583, "max_line_length": 73, "alphanum_fraction": 0.5430769231, "include": true, "reason": "import numpy", "num_tokens": 719} |
import numpy as np
from PIL import Image
import settings as globalv
import pca_utils as pca
from matplotlib import pyplot as plt
import glob
from matplotlib.image import imread
import warnings # Disable warning: https://stackoverflow.com/questions/41001533/how-to-ignore-python-warnings-for-complex-numbers
warnings.filterwarnings('ignore')
# from sklearn.preprocessing import normalize
globalv.init(False)
# globalv.N = 14
globalv.N = 982
N = globalv.N
# imgsize = 50
imgsize = 28
globalv.D = imgsize**2
D = globalv.D
img = np.zeros((imgsize,imgsize))
data = np.zeros((D, N))
plt.figure(1)
i = 0
for filename in glob.glob("four_dataset/*.jpg"):
img = np.asarray(imread(filename))
data[:, i] = np.ravel(img)
i = i + 1
# for i in range(N):
# img = np.asarray(Image.open('faces/' + str(i+1) +'.jpg').convert('L'))
# data[:,i] = np.ravel(img)
print(" *** End reading image ..")
# The loaded images form a data vector of dimensions DxN, D=76^2, N = 6
# that is, 6 patterns of 76^2-dimensional data
# Because there are less patterns than dimensions, the maximum number of uncorrelated
# components we can keep is 6-1 = 5, because there are no more directions of data variability
# Do PCA:
centered_data, mean_vector = pca.centerData(data)
corr = pca.correlationMatrix(centered_data)
eigvals, eigvecs = pca.eigenDecomposition(corr)
# r = pca.readUserNumComponents('\nHow many components (0 <= int <= %d)?\n' %D)
n_comp = 4
# proj_matrix = pca.computeProjectionMatrix(eigvals, eigvecs, r)
proj_matrix = pca.computeProjectionMatrix(eigvals, eigvecs, n_comp)
princ_comps = pca.computePrincipalComponents(centered_data, proj_matrix)
reconstructed_data = pca.reconstructData(princ_comps, proj_matrix, mean_vector)
error = pca.computeRecError(data, reconstructed_data)
# Printing the graph
ori_data = data[:, 0].reshape(imgsize, imgsize)
rec_data = reconstructed_data[:, 0].reshape(imgsize, imgsize)
f, (ax1, ax2) = plt.subplots(1, 2)
f.suptitle('PCA comparison with #Dim = %s' % n_comp)
ax1.imshow(ori_data, cmap='gray', interpolation='none')
ax1.set_title('Original Image')
ax2.imshow(rec_data, cmap='gray', interpolation='none')
ax2.set_title('Compressed Image')
plt.show()
# n_max = 1
# for i in range(n_max):
# rec_data = reconstructed_data[:, i].reshape(imgsize, imgsize)
# plt.subplot(2, n_max, n_max+i+1)
# plt.axis('off')
# plt.imshow(rec_data, cmap='gray', interpolation='none')
#
# plt.show() | {"hexsha": "d435ebd4722f1cc05e6f21a9620ab1ae10d22ee0", "size": 2436, "ext": "py", "lang": "Python", "max_stars_repo_path": "hw3/references/simple-PCA/image_pca.py", "max_stars_repo_name": "ardihikaru/mlsp", "max_stars_repo_head_hexsha": "db38972bcceac7b95808132457c4de9170546c9d", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "hw3/references/simple-PCA/image_pca.py", "max_issues_repo_name": "ardihikaru/mlsp", "max_issues_repo_head_hexsha": "db38972bcceac7b95808132457c4de9170546c9d", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "hw3/references/simple-PCA/image_pca.py", "max_forks_repo_name": "ardihikaru/mlsp", "max_forks_repo_head_hexsha": "db38972bcceac7b95808132457c4de9170546c9d", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-01-07T14:25:54.000Z", "max_forks_repo_forks_event_max_datetime": "2020-01-07T14:25:54.000Z", "avg_line_length": 31.6363636364, "max_line_length": 129, "alphanum_fraction": 0.7282430213, "include": true, "reason": "import numpy", "num_tokens": 681} |
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import unittest
import numpy
import paddle.fluid.core as core
from paddle.fluid.executor import Executor
from paddle.fluid.layers import mul, data, zeros, array_write, increment
class TestExecutor(unittest.TestCase):
def test_mul(self):
i = zeros(shape=[1], dtype='int64')
a = data(name='a', shape=[784], dtype='float32')
array = array_write(x=a, i=i)
i = increment(i)
b = data(
name='b',
shape=[784, 100],
dtype='float32',
append_batch_size=False)
array_write(x=b, i=i, array=array)
i = increment(i)
out = mul(x=a, y=b)
array_write(x=out, i=i, array=array)
a_np = numpy.random.random((100, 784)).astype('float32')
b_np = numpy.random.random((784, 100)).astype('float32')
exe = Executor()
res, res_array = exe.run(feed={'a': a_np,
'b': b_np},
fetch_list=[out, array])
self.assertEqual((100, 100), res.shape)
self.assertTrue(numpy.allclose(res, numpy.dot(a_np, b_np)))
self.assertTrue(numpy.allclose(res_array[0], a_np))
self.assertTrue(numpy.allclose(res_array[1], b_np))
self.assertTrue(numpy.allclose(res_array[2], res))
if __name__ == '__main__':
unittest.main()
| {"hexsha": "ebe820cb90ae235c4a95819f823b74afbc98e3b3", "size": 2000, "ext": "py", "lang": "Python", "max_stars_repo_path": "python/paddle/fluid/tests/unittests/test_executor_and_mul.py", "max_stars_repo_name": "zmxdream/Paddle", "max_stars_repo_head_hexsha": "04f042a5d507ad98f7f2cfc3cbc44b06d7a7f45c", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 17085, "max_stars_repo_stars_event_min_datetime": "2016-11-18T06:40:52.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-31T22:52:32.000Z", "max_issues_repo_path": "python/paddle/fluid/tests/unittests/test_executor_and_mul.py", "max_issues_repo_name": "zmxdream/Paddle", "max_issues_repo_head_hexsha": "04f042a5d507ad98f7f2cfc3cbc44b06d7a7f45c", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 29769, "max_issues_repo_issues_event_min_datetime": "2016-11-18T06:35:22.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-31T16:46:15.000Z", "max_forks_repo_path": "python/paddle/fluid/tests/unittests/test_executor_and_mul.py", "max_forks_repo_name": "zmxdream/Paddle", "max_forks_repo_head_hexsha": "04f042a5d507ad98f7f2cfc3cbc44b06d7a7f45c", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 4641, "max_forks_repo_forks_event_min_datetime": "2016-11-18T07:43:33.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-31T15:15:02.000Z", "avg_line_length": 33.3333333333, "max_line_length": 74, "alphanum_fraction": 0.6375, "include": true, "reason": "import numpy", "num_tokens": 467} |
[STATEMENT]
lemma listrelp_imp_listsp1:
assumes H: "listrelp (\<lambda>x y. P x) xs ys"
shows "listsp P xs"
[PROOF STATE]
proof (prove)
goal (1 subgoal):
1. listsp P xs
[PROOF STEP]
using H
[PROOF STATE]
proof (prove)
using this:
listrelp (\<lambda>x y. P x) xs ys
goal (1 subgoal):
1. listsp P xs
[PROOF STEP]
by induct auto | {"llama_tokens": 147, "file": null, "length": 2} |
\ Tools -- Formatted unsigned single hex number output, not using BASE
\ an-17jan2022
decimal
: .1HX ( x -- ) \ Print last digit of x in hex
15 and \ lowest nibble
9 over < 7 and + \ for A..F
[char] 0 + emit ;
: .NHX ( x n -- ) \ Print last n digits of x in hex
1 max 16 min >r \ x r: n
r@ 1 ?do dup 4 rshift loop \ collect on data stack
r> 0 do .1hx loop space ;
\ ----- end of code -----
(*
Examples
decimal
19150 2 .nhx \ CE
19150 3 .nhx \ ACE
19150 4 .nhx \ 4ACE
19150 8 .nhx \ 00004ACE
\ .NHX version without DO-LOOP
: .NHX ( x n -- ) \ Print last n digits of x in hex
swap >r 1 max 16 min dup \ n n r: x
begin 1- dup while r@ 4 rshift >r \ collect on return stack
repeat drop
begin r> .1hx 1- dup 0= until drop space ;
*)
\ <><>
| {"hexsha": "4d5b5b53c2995465471f1a4c2c895710faea6737", "size": 898, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "Programming-Pearls/print-hex/print-hex.f", "max_stars_repo_name": "embeddingforth/embeddinForth", "max_stars_repo_head_hexsha": "b1921074c6f63be9570c7345e8196a6a4ff44b93", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 7, "max_stars_repo_stars_event_min_datetime": "2021-11-13T15:15:56.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-29T10:56:05.000Z", "max_issues_repo_path": "Programming-Pearls/print-hex/print-hex.f", "max_issues_repo_name": "embeddingforth/embeddinForth", "max_issues_repo_head_hexsha": "b1921074c6f63be9570c7345e8196a6a4ff44b93", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Programming-Pearls/print-hex/print-hex.f", "max_forks_repo_name": "embeddingforth/embeddinForth", "max_forks_repo_head_hexsha": "b1921074c6f63be9570c7345e8196a6a4ff44b93", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 27.2121212121, "max_line_length": 71, "alphanum_fraction": 0.5167037862, "num_tokens": 310} |
\section{FLL}
\pulpissimo containts 3 FLLs. One FLL is meant for generating the clock for the
peripheral domain, one for the core domain (core, memories, event unit etc) and
one is meant for the cluster. The latter is not used.
All the FLLs can be bypassed by writing to the JTAG register before the reset
signal is asserted. See Section ~\ref{sec:soc_ctrl} for more details about the
bypass register.
\subsection{SoC FLL registers}
\begin{table}[htbp]
\small
\begin{tabularx}{\textwidth}{|l|l|l|l|l|l|X|}
\hline
\textbf{Name} & \textbf{Address} & \textbf{Size} & \textbf{Type} & \textbf{Access} & \textbf{Default} & \textbf{Description} \\
\hline
STATUS & \texttt{0x1A100000} & 32 & Status & R & \texttt{0x00000000} & FLL status register \\
\hline
CFG1 & \texttt{0x1A100004} & 32 & Config & R/W & \texttt{0x00000000} & FLL configuration 1 register \\
\hline
CFG2 & \texttt{0x1A100008} & 32 & Config & R/W & \texttt{0x00000000} & FLL configuration 2 register \\
\hline
INTEG & \texttt{0x1A10000C} & 32 & Config & R/W & \texttt{0x00000000} & FLL integrator configuration register. \\
\hline
\end{tabularx}
\caption{SoC FLL register table \label{tab:table_label}}
\end{table}
\regdoc{0x1A10\_0000}{0x0000\_0000}{STATUS}{
\begin{bytefield}[endianness=big,bitwidth=2em]{16}
\bitheader[lsb=16]{16-31} \\
\bitbox{16}{\color{lightgray}\rule{\width}{\height}} \\[3ex]
\bitheader{0-15} \\
\bitbox{16}{MF}
\end{bytefield}
}{
\regitem{Bit 15-0}{MF}{R}{Current DCO multiplication factor value bitfield}
}
\regdoc{0x1A10\_0004}{0x0000\_0000}{CFG1}{
\begin{bytefield}[endianness=big,bitwidth=2em]{16}
\bitheader[lsb=16]{16-31} \\
\bitbox{1}{\tiny CKM} \bitbox{1}{\tiny CKG} \bitbox{4}{CKDIV} \bitbox{10}{ICS} \\[3ex]
\bitheader{0-15} \\
\bitbox{16}{MFN}
\end{bytefield}
}{
\regitem{Bit 31}{CKM}{R/W}{FLL operation mode configuration bitfield
\begin{itemize}
\item 0b0: standalone
\item 0b1: normal
\end{itemize}}
\regitem{Bit 30}{CKG}{R/W}{FLL output clock divider configuration
\begin{itemize}
\item 0b0: not gated
\item 0b1: gated
\end{itemize}}
\regitem{Bit 29-26}{CKDIV}{R/W}{FLL output clock divider configuration}
\regitem{Bit 25-16}{ICS}{R/W}{DCO input code in standalone}
\regitem{Bit 15-0}{MFN}{R/W}{Target clock multiplication factor in normal mode}
}
\regdoc{0x1A10\_0008}{0x0000\_0000}{CFG2}{
\begin{bytefield}[endianness=big,bitwidth=2em]{16}
\bitheader[lsb=16]{16-31} \\
\bitbox{1}{\tiny DITH} \bitbox{1}{\tiny OL} \bitbox{1}{\tiny CKSEL} \bitbox{1}{\color{lightgray}\rule{\width}{\height}} \bitbox{12}{LT} \\[3ex]
\bitheader{0-15} \\
\bitbox{6}{SCKL} \bitbox{6}{UCKL} \bitbox{4}{LG}
\end{bytefield}
}{
\regitem{Bit 31}{DITH}{R/W}{Dithering activation}
\regitem{Bit 30}{CKM}{R/W}{Open loop when locked
\begin{itemize}
\item 0b0: disabled
\item 0b1: enabled
\end{itemize}}
\regitem{Bit 29}{CKSEL}{R/W}{Configuration clock selection in standalone mode
\begin{itemize}
\item 0b0: DCO clock
\item 0b1: Reference clock
\end{itemize}}
\regitem{Bit 27-16}{LT}{R/W}{Lock tolerance configuration. It is the margin
around the multiplication factor within which the output clock is considered
stable.}
\regitem{Bit 15-10}{SCKL}{R/W}{Number of stable REFCLK cycles until LOCK
assert in normal mode. Uppper 6 bits of LOCK assert counter target in
standalone mode.}
\regitem{Bit 9-4}{UCKL}{R/W}{Number of unstable REFCLK cycles until LOCK
de-assert in normal mode. Lower 6 bits of LOCK assert counter target in
standalone mode.}
\regitem{Bit 3-0}{LG}{R/W}{FLL loop gain setting}
}
\regdoc{0x1A10\_000C}{0x0000\_0000}{INTEG}{
\begin{bytefield}[endianness=big,bitwidth=2em]{16}
\bitheader[lsb=16]{16-31} \\
\bitbox{6}{\color{lightgray}\rule{\width}{\height}} \bitbox{10}{INTEG} \\[3ex]
\bitheader{0-15} \\
\bitbox{10}{FRAC} \bitbox{6}{\color{lightgray}\rule{\width}{\height}}
\end{bytefield}
}{
\regitem{Bit 25-16}{INTEG}{R/W}{Integer part of integrator state bitfield. It corresponds to DCO unit bits.}
\regitem{Bit 15-6}{FRAC}{R/W}{Fractional part of integrator state bitfield. It corresponds to dither unit input.}
}
| {"hexsha": "9d89bd16cb0e47312906fdd60c2e049e55ed6cec", "size": 4240, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "doc/datasheet/content/peripherals_fll.tex", "max_stars_repo_name": "davideschiavone/pulpissimo", "max_stars_repo_head_hexsha": "c0e29f22ed947ce5330d950d8432085937136e72", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 228, "max_stars_repo_stars_event_min_datetime": "2018-02-09T15:45:56.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-30T10:27:31.000Z", "max_issues_repo_path": "doc/datasheet/content/peripherals_fll.tex", "max_issues_repo_name": "davideschiavone/pulpissimo", "max_issues_repo_head_hexsha": "c0e29f22ed947ce5330d950d8432085937136e72", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 248, "max_issues_repo_issues_event_min_datetime": "2018-02-14T13:45:47.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-27T10:12:07.000Z", "max_forks_repo_path": "doc/datasheet/content/peripherals_fll.tex", "max_forks_repo_name": "davideschiavone/pulpissimo", "max_forks_repo_head_hexsha": "c0e29f22ed947ce5330d950d8432085937136e72", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 137, "max_forks_repo_forks_event_min_datetime": "2018-02-09T19:04:23.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-31T21:37:22.000Z", "avg_line_length": 37.8571428571, "max_line_length": 145, "alphanum_fraction": 0.6851415094, "num_tokens": 1578} |
"""
"""
import numpy as np
from jax import numpy as jax_np
from jax import jit as jax_jit
from jax import value_and_grad
from ..utils import jax_inverse_sigmoid, jax_sigmoid, jax_adam_wrapper
def test_inverse_sigmoid_actually_inverts():
""""""
x0, k, ylo, yhi = 0, 5, 1, 0
xarr = np.linspace(-1, 1, 100)
yarr = np.array(jax_sigmoid(xarr, x0, k, ylo, yhi))
xarr2 = np.array(jax_inverse_sigmoid(yarr, x0, k, ylo, yhi))
assert np.allclose(xarr, xarr2, rtol=1e-3)
def test_jax_adam_wrapper_actually_minimizes_the_loss():
@jax_jit
def mse_loss(params, data):
x = data[0]
target = 3 * (x - 1)
a, b = params
pred = a * (x - b)
diff = pred - target
loss = jax_np.sum(diff * diff) / diff.size
return loss
@jax_jit
def mse_loss_and_grad(params, data):
return value_and_grad(mse_loss, argnums=0)(params, data)
params_init = np.array((2.75, 0.75))
x = np.linspace(-1, 1, 50)
data = (x,)
loss_init = mse_loss(params_init, data)
n_step = 100
params_bestfit, loss_bestfit, loss_arr, params_arr, flag = jax_adam_wrapper(
mse_loss_and_grad, params_init, data, n_step, step_size=0.01
)
assert loss_arr[-1] < loss_init
assert np.allclose(loss_bestfit, loss_arr[-1], atol=0.001)
params_correct = [3, 1]
assert np.allclose(params_bestfit, params_correct, atol=0.01)
| {"hexsha": "a1ce5b82bd690b7cabdcd42e272af39fec6cdb32", "size": 1408, "ext": "py", "lang": "Python", "max_stars_repo_path": "diffmah/tests/test_utils.py", "max_stars_repo_name": "ArgonneCPAC/diffmah", "max_stars_repo_head_hexsha": "867d11def6284b07e58753f0d4590adc21495e3d", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 5, "max_stars_repo_stars_event_min_datetime": "2021-05-14T10:05:26.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-13T08:56:16.000Z", "max_issues_repo_path": "diffmah/tests/test_utils.py", "max_issues_repo_name": "ArgonneCPAC/diffmah", "max_issues_repo_head_hexsha": "867d11def6284b07e58753f0d4590adc21495e3d", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2021-04-21T20:41:27.000Z", "max_issues_repo_issues_event_max_datetime": "2021-05-05T15:05:03.000Z", "max_forks_repo_path": "diffmah/tests/test_utils.py", "max_forks_repo_name": "ArgonneCPAC/diffmah", "max_forks_repo_head_hexsha": "867d11def6284b07e58753f0d4590adc21495e3d", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-05-05T17:29:31.000Z", "max_forks_repo_forks_event_max_datetime": "2021-05-05T17:29:31.000Z", "avg_line_length": 29.9574468085, "max_line_length": 80, "alphanum_fraction": 0.6519886364, "include": true, "reason": "import numpy,import jax,from jax", "num_tokens": 464} |
# coding=utf-8
import os
import re
import json
import numpy as np
import pandas as pd
import html
import urllib
from itertools import combinations
import jieba
import jieba.posseg as pseg
from collections import defaultdict
from .word_discoverer import WordDiscoverer
from .sent_dict import SentDict
from .resources import get_qh_sent_dict
import logging
import warnings
from pypinyin import lazy_pinyin, pinyin
class HarvestText:
def __init__(self, standard_name=False):
self.standard_name = standard_name # 是否使用连接到的实体名来替换原文
self.entity_types = set()
self.trie_root = {}
self.entity_mention_dict = defaultdict(set)
self.entity_type_dict = {}
self.type_entity_mention_dict = defaultdict(dict)
self.pinyin_mention_dict = defaultdict(set)
self.mentions = set()
self.prepared = False
self.hanlp_prepared = False
self.sent_dict = None
# self.check_overlap = True # 是否检查重叠实体(市长江大桥),开启的话可能会较慢
# 因为只有"freq"策略能够做,所以目前设定:指定freq策略时就默认检查重叠,否则不检查
self.linking_strategy = "None" # 将字面值链接到实体的策略,默认为选取字典序第一个
self.entity_count = defaultdict(int) # 用于'freq'策略
self.latest_mention = dict() # 用于'latest'策略
pwd = os.path.abspath(os.path.dirname(__file__))
with open(pwd + "/resources/pinyin_adjlist.json", "r", encoding="utf-8") as f:
self.pinyin_adjlist = json.load(f)
#
# 实体分词模块
#
def build_trie(self, new_word, entity, entity_type):
type0 = "#%s#" % entity_type
if not type0 in self.entity_types:
punct_regex = r"[、!?。"#$%&'()*+,-/:;<=>@[\]^_`{|}~⦅⦆「」、、〃》「」『』【】〔〕〖〗〘〙〚〛〜〝〞〟〰〾〿–—‘’‛“”„‟…‧﹏!\"\#$%&\'\(\)\*\+,-\./:;<=>?@\[\\\]\^_`{\|}~]"
matched = re.search(punct_regex, entity_type, re.MULTILINE | re.UNICODE)
if matched:
punct0 = matched.group()
raise Exception("Your type input '{}' includes punctuation '{}', please remove them first".format(entity_type,punct0))
self.entity_types.add(type0)
self.prepared = False
self.hanlp_prepared = False
self.mentions.add(new_word)
self.pinyin_mention_dict[tuple(lazy_pinyin(new_word))].add(new_word)
trie_node = self.trie_root
for ch in new_word:
if not ch in trie_node:
trie_node[ch] = {}
trie_node = trie_node[ch]
if not 'leaf' in trie_node:
trie_node['leaf'] = {(entity, type0)}
else:
for (entity_orig, type_orig) in trie_node['leaf'].copy():
if entity_orig == entity: # 不允许同一实体有不同类型
trie_node['leaf'].remove((entity_orig, type_orig))
trie_node['leaf'].add((entity, type0))
def remove_mention(self, mention):
trie_node = self.trie_root
for ch in mention:
if ch in trie_node:
trie_node = trie_node[ch]
else:
return
if not 'leaf' in trie_node:
return
else:
del trie_node['leaf']
def remove_entity(self, entity):
mentions = self.entity_mention_dict[entity]
for mention0 in mentions:
trie_node = self.trie_root
for ch in mention0:
if ch in trie_node:
trie_node = trie_node[ch]
else:
continue
if not 'leaf' in trie_node:
continue
else:
for (entity0, type0) in trie_node['leaf'].copy():
if entity0 == entity:
trie_node["leaf"].remove((entity0, type0))
break
def add_entities(self, entity_mention_dict=None, entity_type_dict=None):
if entity_mention_dict is None and entity_type_dict is None:
return
if entity_mention_dict is None: # 用实体名直接作为默认指称
entity_mention_dict = dict(
(entity0, {entity0}) for entity0 in entity_type_dict)
else:
entity_mention_dict = dict(
(entity0, set(mentions0)) for (entity0, mentions0) in entity_mention_dict.items())
if len(self.entity_mention_dict) == 0:
self.entity_mention_dict = entity_mention_dict
else:
for entity, mentions in entity_type_dict.items():
if entity in self.entity_mention_dict:
self.entity_mention_dict[entity] |= entity_mention_dict[entity]
else:
self.entity_mention_dict[entity] = entity_mention_dict[entity]
if entity_type_dict is None:
entity_type_dict = {entity: "添加词" for entity in entity_mention_dict}
if len(entity_type_dict) == 0:
self.entity_type_dict = entity_type_dict
else:
for entity, type0 in entity_type_dict.items():
if entity in self.entity_type_dict and type0 != self.entity_type_dict[entity]:
# 不允许同一实体有不同类型
warnings.warn("You've added an entity twice with different types, the later type will be used.")
self.entity_type_dict[entity] = type0
type_entity_mention_dict = defaultdict(dict)
for entity0, type0 in self.entity_type_dict.items():
if entity0 in self.entity_mention_dict:
type_entity_mention_dict[type0][entity0] = self.entity_mention_dict[entity0]
self.type_entity_mention_dict = type_entity_mention_dict
self._add_entities(type_entity_mention_dict)
def add_typed_words(self, type_word_dict):
entity_type_dict = dict()
for type0 in type_word_dict:
for word in type_word_dict[type0]:
entity_type_dict[word] = type0
entity_mention_dict = dict(
(entity0, set([entity0])) for entity0 in entity_type_dict.keys())
self.entity_type_dict = entity_type_dict
self.entity_mention_dict = entity_mention_dict
type_entity_mention_dict = defaultdict(dict)
for entity0, type0 in self.entity_type_dict.items():
if entity0 in entity_mention_dict:
type_entity_mention_dict[type0][entity0] = entity_mention_dict[entity0]
self.type_entity_mention_dict = type_entity_mention_dict
self._add_entities(type_entity_mention_dict)
def _add_entities(self, type_entity_mention_dict):
for type0 in type_entity_mention_dict:
entity_mention_dict0 = type_entity_mention_dict[type0]
for entity0 in entity_mention_dict0:
mentions = entity_mention_dict0[entity0]
for mention0 in mentions:
self.build_trie(mention0, entity0, type0)
self.prepare()
def prepare(self):
self.prepared = True
for type0 in self.entity_types:
tag0 = "n"
if "人名" in type0:
tag0 = "nr"
elif "地名" in type0:
tag0 = "ns"
elif "机构" in type0:
tag0 = "nt"
elif "其他专名" in type0:
tag0 = "nz"
jieba.add_word(type0, freq = 10000, tag=tag0)
def hanlp_prepare(self):
from pyhanlp import HanLP, JClass
CustomDictionary = JClass("com.hankcs.hanlp.dictionary.CustomDictionary")
StandardTokenizer = JClass("com.hankcs.hanlp.tokenizer.NLPTokenizer")
self.hanlp_prepared = True
for type0 in self.entity_types:
tag0 = "n"
if "人名" in type0:
tag0 = "nr"
elif "地名" in type0:
tag0 = "ns"
elif "机构" in type0:
tag0 = "nt"
elif "其他专名" in type0:
tag0 = "nz"
CustomDictionary.insert(type0, "%s 1000" % (tag0)) # 动态增加
StandardTokenizer.ANALYZER.enableCustomDictionaryForcing(True)
def deprepare(self):
self.prepared = False
self.hanlp_prepared = False
for type0 in self.entity_types:
del jieba.dt.FREQ[type0]
tag0 = type0[1:-1]
if tag0 in jieba.dt.user_word_tag_tab:
del jieba.dt.user_word_tag_tab[tag0]
jieba.dt.total -= 10000
def check_prepared(self):
if not self.prepared:
self.prepare()
def dig_trie(self, sent, l): # 返回实体右边界r,实体范围
trie_node = self.trie_root
# 需要记录:如果已经找到结果后还继续向前,但是在前面反而没有结果时,回溯寻找之前的记录
# 例如:有mention("料酒","料酒 (焯水用)"), 在字符"料酒 花椒"中匹配时,已经经过"料酒",但却会因为空格继续向前,最后错过结果
records = []
for i in range(l, len(sent)):
if sent[i] in trie_node:
trie_node = trie_node[sent[i]]
else:
break
if "leaf" in trie_node:
records.append((i + 1, trie_node["leaf"]))
if len(records) > 0:
return records[-1]
else:
return -1, set() # -1表示未找到
def search_word_trie(self, word, tolerance=1):
"""
:param word:
:param tolerance:
:return:
"""
results = set()
def _visit(_trie, _word, _tolerance, _mention):
if len(_word) > 0:
ch = _word[0]
if ch in _trie:
_visit(_trie[ch], _word[1:], _tolerance, _mention+ch)
if _tolerance:
for ch in _trie:
if ch not in [_word[0], 'leaf']:
_visit(_trie[ch], _word[1:], _tolerance - 1, _mention+ch)
else:
if 'leaf' in _trie:
results.add(_mention)
_visit(self.trie_root, word, tolerance,"")
return list(results)
def set_linking_strategy(self, strategy, lastest_mention=None, entity_freq=None, type_freq=None):
"""
为实体链接设定一些简单策略,目前可选的有:
'None','freq','latest','latest&freq'
'None': 默认选择候选实体字典序第一个
'freq': 对于单个字面值,选择其候选实体中之前出现最频繁的一个。
对于多个重叠字面值,选择其中候选实体出现最频繁的一个进行连接【每个字面值已经确定唯一映射】。
'latest': 对于单个字面值,如果在最近有可以确定的映射,就使用最近的映射。
'latest'- 对于职称等作为代称的情况可能会比较有用。
比如"经理"可能代指很多人,但是第一次提到的时候应该会包括姓氏。
我们就可以记忆这次信息,在后面用来消歧。
'freq' - 单字面值例:'市长'+{'A市长':5,'B市长':3} -> 'A市长'
重叠字面值例,'xx市长江yy'+{'xx市长':5,'长江yy':3}+{'市长':'xx市长'}+{'长江':'长江yy'} -> 'xx市长'
:param strategy: 可选 'None','freq','latest','latest&freq' 中的一个
:param lastest_mention: dict,用于'latest',预设
:param entity_freq: dict,用于'freq',预设某实体的优先级(词频)
:param type_freq: dict,用于'freq',预设类别所有实体的优先级(词频)
:return None
"""
self.linking_strategy = strategy
if "latest" in strategy:
if lastest_mention:
for surface0, entity0 in lastest_mention.items():
self.latest_mention[surface0] = entity0
if "freq" in strategy:
if entity_freq:
for entity0, freq0 in entity_freq.items():
self.entity_count[entity0] += freq0
if type_freq:
for type0, freq0 in type_freq.items():
for entity0 in self.type_entity_mention_dict[type0].keys():
self.entity_count[entity0] += freq0
def _link_record(self, surface0, entity0):
if "latest" in self.linking_strategy:
for surface0 in self.entity_mention_dict[entity0]:
self.latest_mention[surface0] = entity0
if "freq" in self.linking_strategy:
self.entity_count[entity0] += 1
def choose_from(self, surface0, entity_types):
if self.linking_strategy == "None":
linked_entity_type = list(entity_types)[0]
else:
linked_entity_type = None
if "latest" in self.linking_strategy:
if surface0 in self.latest_mention:
entity0 = self.latest_mention[surface0]
for entity_type0 in entity_types:
if entity0 in entity_type0:
linked_entity_type = entity_type0
break
if linked_entity_type is None:
if "freq" in self.linking_strategy:
candidate, cnt_cand = None, 0
for i, entity_type0 in enumerate(entity_types):
entity0, cnt0 = entity_type0[0], 0
if entity0 in self.entity_count:
cnt0 = self.entity_count[entity0]
if i == 0 or cnt0 > cnt_cand:
candidate, cnt_cand = entity_type0, cnt0
linked_entity_type = candidate
if linked_entity_type is None:
linked_entity_type = list(entity_types)[0]
self._link_record(surface0, linked_entity_type[0])
return linked_entity_type
def mention2entity(self, mention):
'''
找到单个指称对应的实体
:param mention: 指称
:return: 如果存在对应实体,则返回(实体,类型),否则返回None, None
'''
for l in range(len(mention)-1):
r, entity_types = self.dig_trie(mention, l)
if r != -1 and r<=len(mention):
surface0 = mention[0:r] # 字面值
(entity0, type0) = self.choose_from(surface0, entity_types)
return entity0, type0
return None, None
def get_pinyin_correct_candidates(self, word): # 默认最多容忍一个拼音的变化
pinyins = lazy_pinyin(word)
tmp = pinyins[:]
pinyin_cands = {tuple(pinyins)}
for i, pinyin in enumerate(pinyins):
if pinyin in self.pinyin_adjlist:
pinyin_cands |= {tuple(tmp[:i] + [neibr] + tmp[i + 1:]) for neibr in self.pinyin_adjlist[pinyin]}
pinyin_cands = pinyin_cands & set(self.pinyin_mention_dict.keys())
mention_cands = set()
for pinyin in pinyin_cands:
mention_cands |= self.pinyin_mention_dict[pinyin]
return list(mention_cands)
def choose_from_multi_mentions(self,mention_cands,sent=""):
surface0 = mention_cands[0]
entity0, type0 = self.mention2entity(surface0)
self._link_record(surface0, entity0)
return entity0, type0
def _entity_recheck(self, sent, entities_info, pinyin_recheck, char_recheck):
sent2 = self.decoref(sent, entities_info)
for word, flag in pseg.cut(sent2):
if flag.startswith("n"): # 对于名词,再检查是否有误差范围内匹配的其他指称
entity0, type0 = None, None
mention_cands = []
if pinyin_recheck:
mention_cands += self.get_pinyin_correct_candidates(word)
if char_recheck:
mention_cands += self.search_word_trie(word)
if len(mention_cands) > 0:
entity0, type0 = self.choose_from_multi_mentions(mention_cands, sent)
if entity0:
l = sent.find(word)
entities_info.append([(l,l+len(word)),(entity0, type0)])
def _entity_linking(self, sent, pinyin_recheck=False, char_recheck=False, keep_all=False):
entities_info = []
l = 0
while l < len(sent):
r, entity_types = self.dig_trie(sent, l)
if r != -1 and r <= len(sent):
surface0 = sent[l:r] # 字面值
if not keep_all:
entity_type0 = self.choose_from(surface0, entity_types)
if "freq" in self.linking_strategy: # 处理重叠消歧,目前只有freq策略能够做到
overlap_surface_entity_with_pos = {} # 获得每个待链接字面值的“唯一”映射
overlap_surface_entity_with_pos[surface0] = ([l, r], entity_type0)
for ll in range(l + 1, r):
rr, entity_types_2 = self.dig_trie(sent, ll)
if rr != -1 and rr <= len(sent):
surface0_2 = sent[ll:rr] # 字面值
entity_type0_2 = self.choose_from(surface0_2, entity_types_2)
overlap_surface_entity_with_pos[surface0_2] = ([ll, rr], entity_type0_2)
# 再利用频率比较这些映射
candidate, cnt_cand = None, 0
for i, ([ll, rr], entity_type00) in enumerate(overlap_surface_entity_with_pos.values()):
entity00, cnt0 = entity_type00[0], 0
if entity00 in self.entity_count:
cnt0 = self.entity_count[entity00]
if i == 0 or cnt0 > cnt_cand:
candidate, cnt_cand = ([ll, rr], entity_type00), cnt0
entities_info.append(candidate)
l = candidate[0][1]
else:
entities_info.append(([l, r], entity_type0)) # 字典树能根据键找到实体范围,选择则依然需要根据历史等优化
l = r
else:
entities_info.append(([l, r], entity_types)) # 字典树能根据键找到实体范围,选择则依然需要根据历史等优化
l = r
else:
l += 1
return entities_info
def entity_linking(self, sent, pinyin_recheck=False, char_recheck=False, keep_all=False, with_ch_pos=False):
"""
:param sent: 句子/文本
:param pinyin_recheck: do pinyin error check to cover more possible candidates
:param char_recheck: do character error check to cover more possible candidates
:param keep_all: if True, keep all the possibilities of linked entities
:param with_ch_pos: if True, also returns ch_pos
:return: entities_info:依存弧,列表中的列表。
if not keep_all: [([l, r], (entity, type)) for each linked mention m]
else: [( [l, r], set((entity, type) for each possible entity of m) ) for each linked mention m]
ch_pos: 每个字符对应词语的词性标注(不考虑登录的实体,可用来过滤实体,比如去掉都由名词组成的实体,有可能是错误链接)
"""
self.check_prepared()
entities_info = self._entity_linking(sent, pinyin_recheck, char_recheck, keep_all)
if (not keep_all) and (pinyin_recheck or char_recheck):
self._entity_recheck(sent, entities_info, pinyin_recheck, char_recheck)
if with_ch_pos:
ch_pos = []
for word, pos in pseg.cut(sent):
ch_pos.extend([pos] * len(word))
return entities_info, ch_pos
else:
return entities_info
def get_linking_mention_candidates(self, sent, pinyin_recheck=False, char_recheck=False):
mention_cands = defaultdict(list)
cut_result = []
self.check_prepared()
entities_info = self._entity_linking(sent, pinyin_recheck, char_recheck)
sent2 = self.decoref(sent, entities_info)
l = 0
i = 0
for word, flag in pseg.cut(sent2):
if word in self.entity_types:
word = entities_info[i][1][0] # 使用链接的实体
i += 1
cut_result.append(word)
if flag.startswith("n"): # 对于名词,再检查是否有误差范围内匹配的其他指称
cands = []
if pinyin_recheck:
cands += self.get_pinyin_correct_candidates(word)
if char_recheck:
cands += self.search_word_trie(word)
if len(cands) > 0:
mention_cands[(l, l + len(word))] = set(cands)
l += len(word)
sent2 = "".join(cut_result)
return sent2, mention_cands
def decoref(self, sent, entities_info):
left = 0
processed_text = ""
for (beg, end), (entity, e_type) in entities_info:
# processed_text += sent[left:beg] + entity
processed_text += sent[left:beg] + e_type
left = end
processed_text += sent[left:]
return processed_text
def posseg(self, sent, standard_name=False, stopwords=None):
self.standard_name = standard_name
entities_info = self.entity_linking(sent)
sent2 = self.decoref(sent, entities_info)
result = []
i = 0
for word, flag in pseg.cut(sent2):
if word in self.entity_types:
if self.standard_name:
word = entities_info[i][1][0] # 使用链接的实体
else:
l, r = entities_info[i][0] # 或使用原文
word = sent[l:r]
flag = entities_info[i][1][1][1:-1]
i += 1
else:
if stopwords and word in stopwords:
continue
result.append((word, flag))
return result
def seg(self, sent, standard_name=False, stopwords=None, return_sent=False):
self.standard_name = standard_name
entities_info = self.entity_linking(sent)
sent2 = self.decoref(sent, entities_info)
result = []
i = 0
for word in jieba.cut(sent2):
if word in self.entity_types:
if self.standard_name:
word = entities_info[i][1][0] # 使用链接的实体
else:
l, r = entities_info[i][0] # 或使用原文
word = sent[l:r]
i += 1
else:
if stopwords and word in stopwords:
continue
result.append(word)
if return_sent:
return " ".join(result)
else:
return result
def cut_sentences(self, para, drop_empty_line = True): # 分句
para = re.sub('([。!?\?!])([^”’])', r"\1\n\2", para) # 单字符断句符
para = re.sub('(\.{6})([^”’])', r"\1\n\2", para) # 英文省略号
para = re.sub('(\…{2})([^”’])', r"\1\n\2", para) # 中文省略号
para = re.sub('([。!?\?!][”’])([^,。!?\?])', r'\1\n\2', para)
# 如果双引号前有终止符,那么双引号才是句子的终点,把分句符\n放到双引号后,注意前面的几句都小心保留了双引号
para = para.rstrip() # 段尾如果有多余的\n就去掉它
# 很多规则中会考虑分号;,但是这里我把它忽略不计,破折号、英文双引号等同样忽略,需要的再做些简单调整即可。
sentences = para.split("\n")
if drop_empty_line:
sentences = [sent for sent in sentences if len(sent.strip()) > 0]
return sentences
def clean_text(self, text, remove_url=True, email=True, weibo_at=True, stop_terms=("转发微博",),
emoji=True, weibo_topic=False, deduplicate_space=True,
norm_url=False, norm_html=False, to_url=False):
"""
进行各种文本清洗操作,微博中的特殊格式,网址,email,等等
:param text: 输入文本
:param remove_url: (默认使用)是否去除网址
:param email: (默认使用)是否去除email
:param weibo_at: (默认使用)是否去除微博的@相关文本
:param stop_terms: 去除文本中的一些特定词语,默认参数为("转发微博",)
:param emoji: (默认使用)去除[]包围的文本,一般是表情符号
:param weibo_topic: (默认不使用)去除##包围的文本,一般是微博话题
:param deduplicate_space:(默认使用)合并文本中间的多个空格为一个
:param norm_url: (默认不使用)还原URL中的特殊字符为普通格式,如(%20转为空格)
:param norm_html: (默认不使用)还原HTML中的特殊字符为普通格式,如( 转为空格)
:param to_url: (默认不使用)将普通格式的字符转为还原URL中的特殊字符,用于请求,如(空格转为%20)
:return: 清洗后的文本
"""
# 反向的矛盾设置
if norm_url and to_url:
raise Exception("norm_url和to_url是矛盾的设置")
if norm_html:
text = html.unescape(text)
if to_url:
text = urllib.parse.quote(text)
if remove_url:
URL_REGEX = re.compile(
r'(?i)\b((?:https?://|www\d{0,3}[.]|[a-z0-9.\-]+[.][a-z]{2,4}/)(?:[^\s()<>]+|\(([^\s()<>]+|(\([^\s()<>]+\)))*\))+(?:\(([^\s()<>]+|(\([^\s()<>]+\)))*\)|[^\s`!()\[\]{};:\'".,<>?«»“”‘’]))',
re.IGNORECASE)
text = re.sub(URL_REGEX, "", text)
if norm_url:
text = urllib.parse.unquote(text)
if email:
EMAIL_REGEX = re.compile(r"[-a-z0-9_.]+@(?:[-a-z0-9]+\.)+[a-z]{2,6}", re.IGNORECASE)
text = re.sub(EMAIL_REGEX, "", text)
if weibo_at:
text = re.sub(r"(回复)?(//)?\s*@\S*?\s*(:| |$)", " ", text) # 去除正文中的@和回复/转发中的用户名
if emoji:
text = re.sub(r"\[\S+\]", "", text) # 去除表情符号
if weibo_topic:
text = re.sub(r"#\S+#", "", text) # 去除话题内容
if deduplicate_space:
text = re.sub(r"\s+", " ", text) # 合并正文中过多的空格
assert hasattr(stop_terms, "__init__"), Exception("去除的词语必须是一个可迭代对象")
if type(stop_terms) == str:
text = text.replace(stop_terms, "")
else:
for x in stop_terms:
text = text.replace(x, "")
return text.strip()
def named_entity_recognition(self, sent, standard_name=False):
"""
利用pyhanlp的命名实体识别,找到句子中的(人名,地名,机构名)三种实体。harvesttext会预先链接已知实体
:param sent:
:param standard_name:
:return: 发现的命名实体信息,字典 {实体名: 实体类型}
"""
from pyhanlp import HanLP, JClass
if not self.hanlp_prepared:
self.hanlp_prepare()
self.standard_name = standard_name
entities_info = self.entity_linking(sent)
sent2 = self.decoref(sent, entities_info)
StandardTokenizer = JClass("com.hankcs.hanlp.tokenizer.StandardTokenizer")
StandardTokenizer.SEGMENT.enableAllNamedEntityRecognize(True)
entity_type_dict = {}
try:
for x in StandardTokenizer.segment(sent2):
# 三种前缀代表:人名(nr),地名(ns),机构名(nt)
tag0 = str(x.nature)
if tag0.startswith("nr"):
entity_type_dict[x.word] = "人名"
elif tag0.startswith("ns"):
entity_type_dict[x.word] = "地名"
elif tag0.startswith("nt"):
entity_type_dict[x.word] = "机构名"
elif tag0.startswith("nz"):
entity_type_dict[x.word] = "其他专名"
except:
pass
return entity_type_dict
def dependency_parse(self, sent, standard_name=False, stopwords=None):
"""
依存句法分析,调用pyhanlp的接口,并且融入了harvesttext的实体识别机制。
不保证高准确率。
:param sent:
:param standard_name:
:param stopwords:
:return: arcs:依存弧,列表中的列表。
[[词语id,词语字面值或实体名(standard_name控制),词性,依存关系,依存子词语id] for 每个词语]
"""
from pyhanlp import HanLP, JClass
if not self.hanlp_prepared:
self.hanlp_prepare()
self.standard_name = standard_name
entities_info = self.entity_linking(sent)
sent2 = self.decoref(sent, entities_info)
# [word.ID-1, word.LEMMA, word.POSTAG, word.DEPREL ,word.HEAD.ID-1]
arcs = []
i = 0
sentence = HanLP.parseDependency(sent2)
for word in sentence.iterator():
word0, tag0 = word.LEMMA, word.POSTAG
if stopwords and word0 in stopwords:
continue
if word0 in self.entity_types:
if self.standard_name:
word0 = entities_info[i][1][0] # 使用链接的实体
else:
l, r = entities_info[i][0] # 或使用原文
word0 = sent[l:r]
tag0 = entities_info[i][1][1][1:-1]
i += 1
arcs.append([word.ID-1, word0, tag0, word.DEPREL, word.HEAD.ID-1])
return arcs
def triple_extraction(self, sent, standard_name=False, stopwords=None, expand = "all"):
"""
利用主谓宾等依存句法关系,找到句子中有意义的三元组。
很多代码参考:https://github.com/liuhuanyong/EventTriplesExtraction
不保证高准确率。
:param sent:
:param standard_name:
:param stopwords:
:param expand: 默认"all":扩展所有主谓词,"exclude_entity":不扩展已知实体,可以保留标准的实体名,用于链接。"None":不扩展
:return:
"""
arcs = self.dependency_parse(sent, standard_name, stopwords)
'''对找出的主语或者宾语进行扩展'''
def complete_e(words, postags, child_dict_list, word_index):
if expand == "all" or (expand == "exclude_entity" and "#"+postags[word_index]+"#" not in self.entity_types):
child_dict = child_dict_list[word_index]
prefix = ''
if '定中关系' in child_dict:
for i in range(len(child_dict['定中关系'])):
prefix += complete_e(words, postags, child_dict_list, child_dict['定中关系'][i])
postfix = ''
if postags[word_index] == 'v':
if '动宾关系' in child_dict:
postfix += complete_e(words, postags, child_dict_list, child_dict['动宾关系'][0])
if '主谓关系' in child_dict:
prefix = complete_e(words, postags, child_dict_list, child_dict['主谓关系'][0]) + prefix
return prefix + words[word_index] + postfix
elif expand == "None":
return words[word_index]
else: # (expand == "exclude_entity" and "#"+postags[word_index]+"#" in self.entity_types)
return words[word_index]
words, postags = ["" for i in range(len(arcs))], ["" for i in range(len(arcs))]
child_dict_list = [defaultdict(list) for i in range(len(arcs))]
for i, format_parse in enumerate(arcs):
id0, words[i], postags[i], rel, headID = format_parse
child_dict_list[headID][rel].append(i)
svos = []
for index in range(len(postags)):
# 使用依存句法进行抽取
if postags[index]:
# 抽取以谓词为中心的事实三元组
child_dict = child_dict_list[index]
# 主谓宾
if '主谓关系' in child_dict and '动宾关系' in child_dict:
r = words[index]
e1 = complete_e(words, postags, child_dict_list, child_dict['主谓关系'][0])
e2 = complete_e(words, postags, child_dict_list, child_dict['动宾关系'][0])
svos.append([e1, r, e2])
# 定语后置,动宾关系
relation = arcs[index][-2]
head = arcs[index][-1]
if relation == '定中关系':
if '动宾关系' in child_dict:
e1 = complete_e(words, postags, child_dict_list, head)
r = words[index]
e2 = complete_e(words, postags, child_dict_list, child_dict['动宾关系'][0])
temp_string = r + e2
if temp_string == e1[:len(temp_string)]:
e1 = e1[len(temp_string):]
if temp_string not in e1:
svos.append([e1, r, e2])
# 含有介宾关系的主谓动补关系
if '主谓关系' in child_dict and '动补结构' in child_dict:
e1 = complete_e(words, postags, child_dict_list, child_dict['主谓关系'][0])
CMP_index = child_dict['动补结构'][0]
r = words[index] + words[CMP_index]
if '介宾关系' in child_dict_list[CMP_index]:
e2 = complete_e(words, postags, child_dict_list, child_dict_list[CMP_index]['介宾关系'][0])
svos.append([e1, r, e2])
return svos
def clear(self):
self.deprepare()
self.__init__()
#
# 新词发现模块
#
def word_discover(self, doc, threshold_seeds=[], auto_param=True,
excluding_types=[], excluding_words=[], # 可以排除已经登录的某些种类的实体,或者某些指定词
max_word_len=5, min_freq=0.00005, min_entropy=1.4, min_aggregation=50,
ent_threshold="both", mem_saving=0):
# 采用经验参数,此时后面的参数设置都无效
if auto_param: # 根据自己的几个实验确定的参数估计值,没什么科学性,但是应该能得到还行的结果
length = len(doc)
min_entropy = np.log(length) / 10
min_freq = min(0.00005, 20.0 / length)
min_aggregation = np.sqrt(length) / 15
mem_saving = int(length > 300000)
# ent_threshold: 确定左右熵的阈值对双侧都要求"both",或者只要左右平均值达到"avg"
# 对于每句话都很极短的情况(如长度<8),经常出现在左右边界的词语可能难以被确定,这时ent_threshold建议设为"avg"
try:
ws = WordDiscoverer(doc, max_word_len, min_freq, min_entropy, min_aggregation, ent_threshold, mem_saving)
except Exception as e:
logging.log(logging.ERROR, str(e))
info = {"text": [], "freq": [], "left_ent": [], "right_ent": [], "agg": []}
info = pd.DataFrame(info)
info = info.set_index("text")
return info
if len(excluding_types) > 0:
if "#" in list(excluding_types)[0]: # 化为无‘#’标签
excluding_types = [x[1:-1] for x in excluding_types]
ex_mentions = [x for enty in self.entity_mention_dict
if enty in self.entity_type_dict and
self.entity_type_dict[enty] in excluding_types
for x in self.entity_mention_dict[enty]]
else:
ex_mentions = []
ex_mentions += excluding_words
info = ws.get_df_info(ex_mentions)
# 利用种子词来确定筛选优质新词的标准,种子词中最低质量的词语将被保留(如果一开始就被找到的话)
if len(threshold_seeds) > 0:
min_score = 100000
for seed in threshold_seeds:
if seed in info.index:
min_score = min(min_score, info.loc[seed, "score"])
if (min_score >= 100000):
min_score = 0
else:
min_score *= 0.9 # 留一些宽松的区间
info = info[info["score"] > min_score]
return info
def add_new_words(self, new_words):
for word in new_words:
self.build_trie(word, word, "新词")
self.entity_mention_dict[word] = set([word])
self.entity_type_dict[word] = "新词"
if word not in self.type_entity_mention_dict["新词"]:
self.type_entity_mention_dict["新词"][word] = set([word])
else:
self.type_entity_mention_dict["新词"][word].add(word)
self.check_prepared()
def add_new_mentions(self, entity_mention_dict): # 添加链接到已有实体的新别称,一般在新词发现的基础上筛选得到
for entity0 in entity_mention_dict:
type0 = self.entity_type_dict[entity0]
for mention0 in entity_mention_dict[entity0]:
self.entity_mention_dict[entity0].add(mention0)
self.build_trie(mention0, entity0, type0)
self.type_entity_mention_dict[type0][entity0] = self.entity_mention_dict[entity0]
self.check_prepared()
def add_new_entity(self, entity0, mention0=None, type0="添加词"):
if mention0 is None:
mention0 = entity0
self.entity_type_dict[entity0] = type0
if entity0 in self.entity_mention_dict:
self.entity_mention_dict[entity0].add(mention0)
else:
self.entity_mention_dict[entity0] = set([mention0])
self.build_trie(mention0, entity0, type0)
if entity0 not in self.type_entity_mention_dict[type0]:
self.type_entity_mention_dict[type0][entity0] = set([mention0])
else:
self.type_entity_mention_dict[type0][entity0].add(mention0)
self.check_prepared()
def find_entity_with_rule(self, text, rulesets=[], add_to_dict=True, type0="添加词"):
'''
利用规则从分词结果中的词语找到实体,并可以赋予相应的类型再加入实体库
:param text: string, 一段文本
:param rulesets: list of (tuple of rules or single rule) from match_patterns,
list中包含多个规则,满足其中一种规则的词就认为属于这个type
而每种规则由tuple或单个条件(pattern)表示,一个词必须满足其中的一个或多个条件。
:param add_to_dict: 是否把找到的结果直接加入词典
:param type0: 赋予满足条件的词语的实体类型, 仅当add_to_dict时才有意义
:return: found_entities
'''
found_entities = set()
for word in self.seg(text):
for ruleset in rulesets: # 每个ruleset是或关系,只要满足一个就添加并跳过其他
toAdd = True
if type(ruleset) == type((1, 2)): # tuple
for pattern0 in ruleset:
if not pattern0(word):
toAdd = False
break
else: # single rule
pattern0 = ruleset
if not pattern0(word):
toAdd = False
if toAdd:
found_entities.add(word)
break
if add_to_dict:
for entity0 in found_entities:
self.add_new_entity(entity0, entity0, type0)
self.prepare()
return found_entities
#
# 情感分析模块
#
def build_sent_dict(self, sents, method="PMI", min_times=5, scale="None",
pos_seeds=None, neg_seeds=None, stopwords=None):
'''
利用种子词,构建情感词典
:param sents: list of string, 文本列表
:param method: "PMI", 使用的算法,目前仅支持PMI
:param min_times: int, 默认为5, 在所有句子中出现次数少于这个次数的词语将被过滤
:param scale: {"None","0-1","+-1"}, 默认为"None",否则将对情感值进行变换
若为"0-1",按照最大为1,最小为0进行线性伸缩,0.5未必是中性
若为"+-1", 在正负区间内分别伸缩,保留0作为中性的语义
:param pos_seeds: list of string, 积极种子词,如不填写将默认采用清华情感词典
:param neg_seeds: list of string, 消极种子词,如不填写将默认采用清华情感词典
:param stopwords: list of string, stopwords词,如不填写将不使用
:return: sent_dict: 构建好的情感词典,可以像dict一样查询单个词语的情感值
'''
if pos_seeds is None or neg_seeds is None:
sdict = get_qh_sent_dict()
pos_seeds, neg_seeds = sdict["pos"], sdict["neg"]
docs = [set(self.seg(sent)) for sent in sents]
if not stopwords is None:
stopwords = set(stopwords)
for i in range(len(docs)):
docs[i] = docs[i] - stopwords
docs = list(filter(lambda x: len(x) > 0, docs))
self.sent_dict = SentDict(docs, method, min_times, scale, pos_seeds, neg_seeds)
return self.sent_dict
def analyse_sent(self, sent):
return self.sent_dict.analyse_sent(self.seg(sent))
#
# 实体检索模块
#
def build_index(self, docs, with_entity=True, with_type=True):
inv_index = defaultdict(set)
for i, sent in enumerate(docs):
entities_info = self.entity_linking(sent)
for span, (entity, type0) in entities_info:
if with_entity:
inv_index[entity].add(i)
if with_type:
inv_index[type0].add(i)
return inv_index
def get_entity_counts(self, docs, inv_index, used_type=[]):
if len(used_type) > 0:
entities = iter(x for x in self.entity_type_dict
if self.entity_type_dict[x] in used_type)
else:
entities = self.entity_type_dict.keys()
cnt = {enty: len(inv_index[enty]) for enty in entities if enty in inv_index}
return cnt
def search_entity(self, query, docs, inv_index):
words = query.split()
if len(words) > 0:
ids = inv_index[words[0]]
for word in words[1:]:
ids = ids & inv_index[word]
np_docs = np.array(docs)[list(ids)]
return np_docs.tolist()
else:
return []
#
# 文本摘要模块
#
def get_summary(self, docs, topK=5, stopwords=None, with_importance=False, standard_name=True):
import networkx as nx
def sent_sim1(words1, words2):
if len(words1) <= 1 or len(words2) <= 1:
return 0.0
return (len(set(words1) & set(words2))) / (np.log2(len(words1)) + np.log2(len(words2)))
# 使用standard_name,相似度可以基于实体链接的结果计算而更加准确
sents = [self.seg(doc.strip(), standard_name=standard_name, stopwords=stopwords) for doc in docs]
sents = [sent for sent in sents if len(sent) > 0]
G = nx.Graph()
for u, v in combinations(range(len(sents)), 2):
G.add_edge(u, v, weight=sent_sim1(sents[u], sents[v]))
pr = nx.pagerank_scipy(G)
pr_sorted = sorted(pr.items(), key=lambda x: x[1], reverse=True)
if with_importance:
return [(docs[i], imp) for i, imp in pr_sorted[:topK]]
else:
return [docs[i] for i, rank in pr_sorted[:topK]]
#
# 实体网络模块
#
def build_entity_graph(self, docs, min_freq=0, inv_index={}, used_types=[]):
import networkx as nx
G = nx.Graph()
links = {}
if len(inv_index) == 0:
for i, sent in enumerate(docs):
entities_info = self.entity_linking(sent)
if len(used_types) == 0:
entities = set(entity for span, (entity, type0) in entities_info)
else:
entities = set(entity for span, (entity, type0) in entities_info if type0[1:-1] in used_types)
for u, v in combinations(entities, 2):
pair0 = tuple(sorted((u, v)))
if pair0 not in links:
links[pair0] = 1
else:
links[pair0] += 1
else: # 已经有倒排文档,可以更快速检索
if len(used_types) == 0:
entities = self.entity_type_dict.keys()
else:
entities = iter(entity for (entity, type0) in self.entity_type_dict.items() if type0 in used_types)
for u, v in combinations(entities, 2):
pair0 = tuple(sorted((u, v)))
ids = inv_index[u] & inv_index[v]
if len(ids) > 0:
links[pair0] = len(ids)
for (u, v) in links:
if links[(u, v)] >= min_freq:
G.add_edge(u, v, weight=links[(u, v)])
self.entity_graph = G
return G
def build_word_ego_graph(self, docs, word, standard_name=True, min_freq=0, other_min_freq=-1, stopwords=None):
'''
根据文本和指定限定词,获得以限定词为中心的各词语的关系。
限定词可以是一个特定的方面(衣食住行这类文档),这样就可以从词语中心图中获得关于这个方面的简要信息
:param docs: 文本的列表
:param word: 限定词
:param standard_name: 把所有实体的指称化为标准实体名
:param stopwords: 需要过滤的停用词
:param min_freq: 作为边加入到图中的与中心词最小共现次数,用于筛掉可能过多的边
:param other_min_freq: 中心词以外词语关系的最小共现次数
:return: G(networxX中的Graph)
'''
import networkx as nx
G = nx.Graph()
links = {}
if other_min_freq == -1:
other_min_freq = min_freq
for doc in docs:
if stopwords:
words = set(x for x in self.seg(doc, standard_name=standard_name) if x not in stopwords)
else:
words = self.seg(doc, standard_name=standard_name)
if word in words:
for u, v in combinations(words, 2):
pair0 = tuple(sorted((u, v)))
if pair0 not in links:
links[pair0] = 1
else:
links[pair0] += 1
used_nodes = set([word]) # 关系对中涉及的词语必须与实体有关(>= min_freq)
for (u, v) in links:
w = links[(u, v)]
if word in (u, v) and w >= min_freq:
used_nodes.add(v if word == u else u)
G.add_edge(u, v, weight=w)
elif w >= other_min_freq:
G.add_edge(u, v, weight=w)
G = G.subgraph(used_nodes).copy()
return G
def build_entity_ego_graph(self, docs, word, min_freq=0, other_min_freq=-1, inv_index={}, used_types=[]):
'''
Entity only version of build_word_ego_graph()
'''
import networkx as nx
G = nx.Graph()
links = {}
if other_min_freq == -1:
other_min_freq = min_freq
if len(inv_index) != 0:
related_docs = self.search_entity(word, docs, inv_index)
else:
related_docs = []
for doc in docs:
entities_info = self.entity_linking(doc)
entities = [entity0 for [[l,r], (entity0,type0)] in entities_info]
if word in entities:
related_docs.append(doc)
for i, sent in enumerate(related_docs):
entities_info = self.entity_linking(sent)
if len(used_types) == 0:
entities = set(entity for span, (entity, type0) in entities_info)
else:
entities = set(entity for span, (entity, type0) in entities_info if type0[1:-1] in used_types)
for u, v in combinations(entities, 2):
pair0 = tuple(sorted((u, v)))
if pair0 not in links:
links[pair0] = 1
else:
links[pair0] += 1
used_nodes = set([word]) # 关系对中涉及的词语必须与实体有关(>= min_freq)
for (u, v) in links:
w = links[(u, v)]
if word in (u, v) and w >= min_freq:
used_nodes.add(v if word == u else u)
G.add_edge(u, v, weight=w)
elif w >= other_min_freq:
G.add_edge(u, v, weight=w)
G = G.subgraph(used_nodes).copy()
return G
| {"hexsha": "142d312d20d6e08e4ea2f35c9ee1103b070ef548", "size": 44553, "ext": "py", "lang": "Python", "max_stars_repo_path": "harvesttext/harvesttext.py", "max_stars_repo_name": "wainshine/HarvestText", "max_stars_repo_head_hexsha": "391aed784d27b182cbf4e2e25d8526ee48f23377", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-01-31T01:19:31.000Z", "max_stars_repo_stars_event_max_datetime": "2021-01-31T01:19:31.000Z", "max_issues_repo_path": "harvesttext/harvesttext.py", "max_issues_repo_name": "BoChen-Daniel/HarvestText", "max_issues_repo_head_hexsha": "bc32dfed5b6d5123a9f4b065fb37089d21c979da", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "harvesttext/harvesttext.py", "max_forks_repo_name": "BoChen-Daniel/HarvestText", "max_forks_repo_head_hexsha": "bc32dfed5b6d5123a9f4b065fb37089d21c979da", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2019-12-27T17:49:38.000Z", "max_forks_repo_forks_event_max_datetime": "2021-01-13T08:02:42.000Z", "avg_line_length": 42.391056137, "max_line_length": 202, "alphanum_fraction": 0.5521064799, "include": true, "reason": "import numpy,import networkx", "num_tokens": 12853} |
import re
import numpy as np
import pytest
from ctc_decoder import BKTree
from ctc_decoder import LanguageModel
from ctc_decoder import beam_search
from ctc_decoder import best_path
from ctc_decoder import lexicon_search
from ctc_decoder import loss
from ctc_decoder import prefix_search_heuristic_split
from ctc_decoder import probability
from ctc_decoder import token_passing
def softmax(mat):
maxT, _ = mat.shape # dim0=t, dim1=c
res = np.zeros(mat.shape)
for t in range(maxT):
y = mat[t, :]
e = np.exp(y)
s = np.sum(e)
res[t, :] = e / s
return res
def load_rnn_output(fn):
return np.genfromtxt(fn, delimiter=';')[:, : -1]
@pytest.fixture
def line_mat():
return softmax(load_rnn_output('../data/line/rnnOutput.csv'))
@pytest.fixture
def word_mat():
return softmax(load_rnn_output('../data/word/rnnOutput.csv'))
@pytest.fixture
def chars():
return ' !"#&\'()*+,-./0123456789:;?ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz'
@pytest.fixture
def corpus():
with open('../data/line/corpus.txt') as f:
txt = f.read()
return txt
@pytest.fixture
def words():
with open('../data/word/corpus.txt') as f:
words = f.read().split()
return words
def test_line_example_best_path(line_mat, chars):
mat = line_mat
assert best_path(mat, chars) == 'the fak friend of the fomly hae tC'
def test_line_example_prefix_search_heuristic_split(line_mat, chars):
mat = line_mat
assert prefix_search_heuristic_split(mat, chars) == 'the fak friend of the fomcly hae tC'
def test_line_example_beam_search(line_mat, chars):
mat = line_mat
assert beam_search(mat, chars) == 'the fak friend of the fomcly hae tC'
def test_line_example_beam_search_with_language_model(line_mat, chars, corpus):
mat = line_mat
# create language model from text corpus
lm = LanguageModel(corpus, chars)
assert beam_search(mat, chars, lm=lm) == 'the fake friend of the family, lie th'
def test_line_example_token_passing(line_mat, chars, corpus):
mat = line_mat
# create language model from text corpus
words = re.findall(r'\w+', corpus)
assert token_passing(mat, chars, words) == 'the fake friend of the family fake the'
def test_line_example_loss_and_probability(line_mat, chars):
mat = line_mat
gt = 'the fake friend of the family, like the'
assert np.isclose(probability(mat, gt, chars), 6.31472642886565e-13)
assert np.isclose(loss(mat, gt, chars), 28.090721774903226)
def test_word_example_best_path(word_mat, chars, words):
mat = word_mat
assert best_path(mat, chars) == 'aircrapt'
def test_word_example_lexicon_search(word_mat, chars, words):
mat = word_mat
# create BK tree from list of words
bk_tree = BKTree(words)
assert lexicon_search(mat, chars, bk_tree, tolerance=4) == 'aircraft'
| {"hexsha": "69bfcc49dd3e50caca02368b75b9ff9469038340", "size": 2884, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/test_real_example.py", "max_stars_repo_name": "TenaciousC22/CTCDecoder", "max_stars_repo_head_hexsha": "8a52aa0557f41bb8674643f0a5ae8071224aec79", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 705, "max_stars_repo_stars_event_min_datetime": "2017-10-10T15:58:36.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-31T07:25:50.000Z", "max_issues_repo_path": "tests/test_real_example.py", "max_issues_repo_name": "TenaciousC22/CTCDecoder", "max_issues_repo_head_hexsha": "8a52aa0557f41bb8674643f0a5ae8071224aec79", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 24, "max_issues_repo_issues_event_min_datetime": "2017-11-01T15:31:19.000Z", "max_issues_repo_issues_event_max_datetime": "2022-01-20T11:52:44.000Z", "max_forks_repo_path": "tests/test_real_example.py", "max_forks_repo_name": "TenaciousC22/CTCDecoder", "max_forks_repo_head_hexsha": "8a52aa0557f41bb8674643f0a5ae8071224aec79", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 172, "max_forks_repo_forks_event_min_datetime": "2017-11-20T20:01:01.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-31T08:15:31.000Z", "avg_line_length": 25.298245614, "max_line_length": 93, "alphanum_fraction": 0.7111650485, "include": true, "reason": "import numpy", "num_tokens": 743} |
# # Variational problems
#
# In this example, we will numerically simulate an entropy-regularised Wasserstein gradient flow
# approximating the Fokker-Planck and porous medium equations.
#
# The connection between Wasserstein gradient flows and (non)-linear PDEs is due to Jordan, Kinderlehrer and Otto [^JKO98], and
# an easy-to-read overview of the topic is provided in Section 9.3 [^PC19]
#
# [^JKO98]: Jordan, Richard, David Kinderlehrer, and Felix Otto. "The variational formulation of the Fokker--Planck equation." SIAM journal on mathematical analysis 29.1 (1998): 1-17.
# [^PC19]: Peyré, Gabriel, and Marco Cuturi. "Computational optimal transport: With applications to data science." Foundations and Trends® in Machine Learning 11.5-6 (2019): 355-607.
#
# ## Fokker-Planck equation as a $W_2$ gradient flow
# For a potential function $\Psi$ and noise level $\sigma^2$, the Fokker-Planck equation (FPE) is
# ```math
# \partial_t \rho_t = \nabla \cdot (\rho_t \nabla \Psi) + \frac{\sigma^2}{2} \Delta \rho_t,
# ```
# and we take no-flux (Neumann) boundary conditions.
#
# This describes the evolution of a massless particle undergoing both diffusion (with diffusivity $\sigma^2$) and drift (along potential $\Psi$) according to the stochastic differential equation
# ```math
# dX_t = -\nabla \Psi(X_t) dt + \sigma dB_t.
# ```
# The result of Jordan, Kinderlehrer and Otto (commonly referred to as the JKO theorem) states that
# $\rho_t$ evolves following the 2-Wasserstein gradient flow of the Gibbs free energy functional
# ```math
# F(\rho) = \int \Psi d\rho + \int \log(\rho) d\rho.
# ```
#
# ## Implicit schemes for gradient flows
# In an Euclidean space, the gradient flow of a functional $F$ is simply the solution of an ordinary differential equation
# ```math
# \dfrac{dx(t)}{dt} = -\nabla F(x(t)).
# ```
# Of course, there is a requirement that $F$ is smooth. A more general formulation of a gradient flow that allows
# $F$ to be non-smooth is the implicit scheme
# ```math
# x_{t+\tau} = \operatorname{argmin}_x \frac{1}{2} \| x - x_t \|_2^2 + \tau F(x).
# ```
# As the timestep $\tau$ shrinks, $x_t$ becomes a better and better approximation to the gradient flow of $F$.
#
# ## Wasserstein gradient flow
# In the context of the JKO theorem, we seek $\rho_t$ that is the gradient flow of $F$ with
# respect to the 2-Wasserstein distance. This can be achieved by choosing the $W_2$ metric in the implicit step:
# ```math
# \rho_{t + \tau} = \operatorname{argmin}_{\rho} d_{W_2}^2(\rho_{t}, \rho) + \tau F(\rho).
# ```
# Finally, a numerical scheme for computing this gradient flow can be developed by using the entropic regularisation
# of optimal transport on a discretised domain
# ```math
# \rho_{t + \tau} = \operatorname{argmin}_{\rho} \operatorname{OT}_\varepsilon(\rho_{t}, \rho) + \tau F(\rho),
# ```
# where
# ```math
# \operatorname{OT}_\varepsilon(\alpha, \beta) = \min_{\gamma \in \Pi(\alpha, \beta)} \sum_{i,j} \frac{1}{2} \| x_i - x_j \|_2^2 \gamma_{ij} + \varepsilon \sum_{i, j} \gamma_{ij} \log(\gamma_{ij}).
# ```
# Each step of this problem is a minimisation problem with respect to $\rho$.
# Since we use entropic optimal transport which is differentiable, this can be solved using gradient-based methods.
# ## Problem setup
#
using OptimalTransport
using Distances
using LogExpFunctions
using Optim
using Plots
using StatsBase
using ReverseDiff
using LinearAlgebra
using Logging
# Here, we set up the computational domain that we work on - we discretize the interval $[-1, 1]$.
# The natural boundary conditions to use will be Neumann (zero flux), see e.g. [^Santam2017]
#
# [^Santam2017]: Santambrogio, Filippo. "{Euclidean, metric, and Wasserstein} gradient flows: an overview." Bulletin of Mathematical Sciences 7.1 (2017): 87-154.
support = range(-1, 1; length=64)
C = pairwise(SqEuclidean(), support');
# Now we set up various functionals that we will use.
#
# We define the generalised entropy (Equation (4.4) of [^Peyre2015]) as follows. For $m = 1$ this is just the "regular" entropy, and $m = 2$ this is squared $L_2$.
#
# [^Peyre2015]: Peyré, Gabriel. "Entropic approximation of Wasserstein gradient flows." SIAM Journal on Imaging Sciences 8.4 (2015): 2323-2351.
function E(ρ; m=1)
if m == 1
return sum(xlogx.(ρ)) - sum(ρ)
elseif m > 1
return dot(ρ, @. (ρ^(m - 1) - m) / (m - 1))
end
end;
# Now define $\psi(x)$ to be a potential energy function that has two potential wells at $x = ± 0.5$.
ψ(x) = 10 * (x - 0.5)^2 * (x + 0.5)^2;
plot(support, ψ.(support); color="black", label="Scalar potential")
# Having defined $\psi$, this induces a potential energy functional $\Psi$ on probability distributions $\rho$:
# ```math
# \Psi(\rho) = \int \psi(x) \rho(x) dx = \langle \psi, \rho \rangle .
# ```
Ψ = ψ.(support);
# Define the time step $\tau$ and entropic regularisation level $\varepsilon$, and form the associated Gibbs kernel $K = e^{-C/\varepsilon}$.
τ = 0.05
ε = 0.01
K = @. exp(-C / ε);
# We define the (non-smooth) initial condition $\rho_0$ in terms of step functions.
H(x) = x > 0
ρ0 = @. H(support + 0.25) - H(support - 0.25)
ρ0 = ρ0 / sum(ρ0)
plot(support, ρ0; label="Initial condition ρ0", color="blue")
# `G_fpe` is the objective function for the implicit step scheme
# ```math
# G_\mathrm{fpe}(\rho) = \operatorname{OT}_\varepsilon(\rho_{t}, \rho) + \tau F(\rho),
# ```
# and we seek to minimise in $\rho$.
function G_fpe(ρ, ρ0, τ, ε, C)
return sinkhorn2(ρ, ρ0, C, ε; regularization=true, maxiter=250) + τ * (dot(Ψ, ρ) + E(ρ))
end;
# `step` solves the implicit step problem to produce $\rho_{t + \tau}$ from $\rho_t$.
function step(ρ0, τ, ε, C, G)
## only print error messages
obj = u -> G(softmax(u), ρ0, τ, ε, C)
opt = with_logger(SimpleLogger(stderr, Logging.Error)) do
optimize(
obj,
ones(size(ρ0)),
LBFGS(),
Optim.Options(; iterations=50, g_tol=1e-6);
autodiff=:forward,
)
end
return softmax(Optim.minimizer(opt))
end
# Now we simulate `N = 10` iterates of the gradient flow and plot the result.
N = 10
ρ = similar(ρ0, size(ρ0, 1), N)
ρ[:, 1] = ρ0
for i in 2:N
@info i
ρ[:, i] = step(ρ[:, i - 1], τ, ε, C, G_fpe)
end
colors = range(colorant"red"; stop=colorant"blue", length=N)
plot(
support,
ρ;
title=raw"$F(\rho) = \langle \psi, \rho \rangle + \langle \rho, \log(\rho) \rangle$",
palette=colors,
legend=nothing,
)
# ## Porous medium equation
#
# The porous medium equation (PME) is the nonlinear PDE
# ```math
# \partial_t \rho = \nabla \cdot (\rho \nabla \Psi) + \Delta \rho^m,
# ```
# again with Neumann boundary conditions. The value of $m$ in the PME corresponds to picking $m$ in the generalised entropy functional.
# Now, we will solve the PME with $m = 2$ as a Wasserstein gradient flow.
#
function G_pme(ρ, ρ0, τ, ε, C)
return sinkhorn2(ρ, ρ0, C, ε; regularization=true, maxiter=250) +
τ * (dot(Ψ, ρ) + E(ρ; m=2))
end;
# set up as previously
N = 10
ρ = similar(ρ0, size(ρ0, 1), N)
ρ[:, 1] = ρ0
for i in 2:N
ρ[:, i] = step(ρ[:, i - 1], τ, ε, C, G_pme)
end
plot(
support,
ρ;
title=raw"$F(\rho) = \langle \psi, \rho \rangle + \langle \rho, \rho - 1\rangle$",
palette=colors,
legend=nothing,
)
# ## Exploiting duality
#
# The previous examples solved the minimisation problem for the implicit gradient flow step directly, involving automatic differentiation through the Sinkhorn iterations used to compute $\operatorname{OT}_\varepsilon(\rho_t, \rho)$ each time a gradient needs to be evaluated.
# While this is straightforward to implement, it is computationally costly.
# An alternative approach for convex variational problems is to proceed via the [dual problem](https://en.wikipedia.org/wiki/Duality_(optimization)).
# The benefit of proceeding via the dual problem is that the part of the dual minimisation problem corresponding to the (entropy-regularised) optimal transport loss is typically available in closed form. This is in contrast to the primal problem, where evaluation of the objective and its gradients requires potentially many Sinkhorn iterations.
#
# Consider a general convex and unconstrained problem. Under (usually satisfied) conditions for strong duality to hold, we have
# ```math
# \begin{aligned}
# &\min_{\rho} \operatorname{OT}_{\varepsilon}(\rho_0, \rho) + \mathcal{F}(\rho) \\
# &= \min_{\rho} \sup_{u}\left[\langle \rho, u \rangle - \operatorname{OT}^*_{\varepsilon}(\rho_0, u)\right] + \mathcal{F}(\rho) \\
# &= \sup_{u} \min_{\rho} \langle \rho, u \rangle - \operatorname{OT}^*_{\varepsilon}(\rho_0, u) + \mathcal{F}(\rho) \\
# &= \sup_{u} - \operatorname{OT}^*_{\varepsilon}(\rho_0, u) + \min_{\rho} \langle \rho, u \rangle + \mathcal{F}(\rho) \\
# &= \sup_{u} - \operatorname{OT}^*_{\varepsilon}(\rho_0, u) - \sup_{\rho} \langle \rho, -u \rangle - \mathcal{F}(\rho) \\
# &= \sup_{u} - \operatorname{OT}^*_{\varepsilon}(\rho_0, u) - \mathcal{F}^*(-u).
# \end{aligned}
# ```
# Thus, the dual problem is
# ```math
# \min_{u} \operatorname{OT}^*_{\varepsilon}(\rho_0, u) + \mathcal{F}^*(-u).
# ```
#
# The upshot here is that $u \mapsto \operatorname{OT}^*_{\varepsilon}(\rho_0, u)$ and its gradient is available in closed form. This is a known result in the literature [^CP18].
#
# [^CP18]: Cuturi, Marco, and Gabriel Peyré. “Semi-Dual Regularized Optimal Transport.” ArXiv: Learning, 2018.
#
# The formulas we state below are lifted from statements in [^Z21].
#
# [^Z21]: Zhang, Stephen Y. “A Unified Framework for Non-Negative Matrix and Tensor Factorisations with a Smoothed Wasserstein Loss.” ArXiv: Machine Learning, 2021.
#
# ```math
# \begin{aligned}
# \operatorname{OT}^*_{\varepsilon}(\rho_0, u) &= -\varepsilon \left\langle \rho_0, \log\left( \dfrac{\rho_0}{K e^{u/\varepsilon}} \right) - 1\right\rangle, \\
# \nabla_u \operatorname{OT}^*_{\varepsilon}(\rho_0, u) &= K^\top \left( \dfrac{\rho_0}{K e^{u/\varepsilon}} \right) \odot e^{u/\varepsilon}.
# \end{aligned}
# ```
# At optimality, we can recover the primal optimal point $\rho^\star$ from the dual optimal point $u^\star$ following the formula
# ```math
# \rho^\star = e^{u^\star/\varepsilon} \odot K^\top \dfrac{\rho_0}{K e^{u^\star/\varepsilon}}.
# ```
#
# When $\mathcal{F}^*(\cdot)$ is also available in closed form (this is not always the case), the dual problem has a closed form objective and can generally be solved much more efficiently than the primal problem.
#
# In the setting of the Fokker-Planck and porous medium equations, the function $\mathcal{F}$ can be identified with
#
# ```math
# \mathcal{F}(\rho) = \tau \left[ \langle \psi, \rho \rangle + E_m(\rho) \right].
# ```
#
# A straightforward computation shows that
# ```math
# \mathcal{F}^*(u) = \tau E_m^*\left( \frac{u}{\tau}-\psi \right),
# ```
# where
# ```math
# E_m^*(u) = \begin{cases}
# \langle e^u, \mathbf{1} \rangle, & m = 1 \\
# \sum_i \left[ \left( u_i + \frac{m}{m-1} \right) \left( \frac{m-1}{m} u_i + 1 \right)^{\frac{1}{m-1}} - \frac{1}{m-1} \left( \frac{m-1}{m} u_i + 1 \right)^{\frac{m}{m-1}} \right], & m > 1.
# \end{cases}
# ```
# In particular, for $m = 2$ we have a simpler formula
# ```math
# E_2^*(u) = \left\| \frac{u}{2} + 1 \right\|_2^2
# ```
#
# We now implement $E_m^*$ for $m = 1, 2$.
E_dual(u, m::Val{1}) = sum(exp.(u))
function E_dual(u, m::Val{2})
return dot(u / 2 .+ 1, u / 2 .+ 1)
end;
#
# So, the dual problem we are dealing with reads
# ```math
# \min_{u} \operatorname{OT}^*_{\varepsilon}(\rho_0, u) + \tau E_m^*\left( \frac{-u}{\tau}-\psi \right),
# ```
# and we can thus set up `G_dual_fpe`, the dual objective.
#
function G_dual_fpe(u, ρ0, τ, ε, K)
return OptimalTransport.Dual.ot_entropic_semidual(ρ0, u, ε, K) +
τ * E_dual(-u / τ - Ψ, Val(1))
end;
#
# Now we set up `step` as previously, except this time we need to convert from the optimal dual variable $u^\star$ to the primal variable $\rho^\star$. In the code, this is handled by `getprimal_ot_entropic_semidual`. We use `ReverseDiff` in this problem.
#
function step(ρ0, τ, ε, K, G)
obj = u -> G(u, ρ0, τ, ε, K)
opt = optimize(
obj,
(∇, u) -> ReverseDiff.gradient!(∇, obj, u),
zeros(size(ρ0)),
LBFGS(),
Optim.Options(; iterations=250, g_tol=1e-6),
)
return OptimalTransport.Dual.getprimal_ot_entropic_semidual(
ρ0, Optim.minimizer(opt), ε, K
)
end;
#
# Now we can solve the dual problem as previously, and we note that the dual formulation is solved an order of magnitude faster than the primal formulation.
#
ρ = similar(ρ0, size(ρ0, 1), N)
ρ[:, 1] = ρ0
for i in 2:N
ρ[:, i] = step(ρ[:, i - 1], τ, ε, K, G_dual_fpe)
end
colors = range(colorant"red"; stop=colorant"blue", length=N)
plot(
support,
ρ;
title=raw"$F(\rho) = \langle \psi, \rho \rangle + \langle \rho, \log(\rho) \rangle$",
palette=colors,
legend=nothing,
)
# Setting `m = 2`, we can simulate instead the porous medium equation.
#
function G_dual_pme(u, ρ0, τ, ε, K)
return OptimalTransport.Dual.ot_entropic_semidual(ρ0, u, ε, K) +
τ * E_dual(-u / τ - Ψ, Val(2))
end
ρ = similar(ρ0, size(ρ0, 1), N)
ρ[:, 1] = ρ0
for i in 2:N
@info i
ρ[:, i] = step(ρ[:, i - 1], τ, ε, K, G_dual_pme)
end
colors = range(colorant"red"; stop=colorant"blue", length=N)
plot(
support,
ρ;
title=raw"$F(\rho) = \langle \psi, \rho \rangle + \langle \rho, \rho - 1\rangle$",
palette=colors,
legend=nothing,
)
| {"hexsha": "d6bc7cdb078010a03f55d18fa65e3e3f10e4dba1", "size": 13544, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "examples/variational/script.jl", "max_stars_repo_name": "zsteve/ot.jl", "max_stars_repo_head_hexsha": "a82e3fb19b00839ba809225e76a4ea43b49a315d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 41, "max_stars_repo_stars_event_min_datetime": "2021-05-28T14:16:18.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-30T17:38:40.000Z", "max_issues_repo_path": "examples/variational/script.jl", "max_issues_repo_name": "zsteve/ot.jl", "max_issues_repo_head_hexsha": "a82e3fb19b00839ba809225e76a4ea43b49a315d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 109, "max_issues_repo_issues_event_min_datetime": "2021-05-18T18:45:12.000Z", "max_issues_repo_issues_event_max_datetime": "2022-01-30T12:30:01.000Z", "max_forks_repo_path": "examples/variational/script.jl", "max_forks_repo_name": "zsteve/ot.jl", "max_forks_repo_head_hexsha": "a82e3fb19b00839ba809225e76a4ea43b49a315d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2021-08-30T12:11:47.000Z", "max_forks_repo_forks_event_max_datetime": "2021-11-20T19:11:56.000Z", "avg_line_length": 42.325, "max_line_length": 345, "alphanum_fraction": 0.6549763733, "num_tokens": 4520} |
using HTTP
using JSON
export RESTAPI
struct RESTAPI
baseurl
end
function (m::RESTAPI)(cmd)
try
url = "$(m.baseurl)/$cmd"
response = HTTP.get(url)
return JSON.parse(String(response.body))
catch e
return "error for $url occurred: $e"
end
end
| {"hexsha": "692109dc3f31a4d9d9ead35eb155f30b4b74a3dc", "size": 292, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/utils.jl", "max_stars_repo_name": "dhonza/Loudspeakers.jl", "max_stars_repo_head_hexsha": "cae99792aaa822b4c74fe2f018e546c20784e35b", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/utils.jl", "max_issues_repo_name": "dhonza/Loudspeakers.jl", "max_issues_repo_head_hexsha": "cae99792aaa822b4c74fe2f018e546c20784e35b", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/utils.jl", "max_forks_repo_name": "dhonza/Loudspeakers.jl", "max_forks_repo_head_hexsha": "cae99792aaa822b4c74fe2f018e546c20784e35b", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 14.6, "max_line_length": 48, "alphanum_fraction": 0.6130136986, "num_tokens": 80} |
'''
Utility functions.
'''
import numpy as np
from functools import partial
def fwht(x):
"""Recursive implementation of the 1D Cooley-Tukey FFT"""
# x = np.asarray(x, dtype=float)
N = x.shape[0]
if N == 1:
return x
else:
X_even = fwht(x[0:(N//2)])
X_odd = fwht(x[(N//2):])
return np.concatenate([(X_even + X_odd),
(X_even - X_odd)])
def bin_to_dec(x):
n = len(x)
c = 2**(np.arange(n)[::-1])
return c.dot(x).astype(np.int)
def dec_to_bin(x, num_bits):
assert x < 2**num_bits, "number of bits are not enough"
u = bin(x)[2:].zfill(num_bits)
u = list(u)
u = [int(i) for i in u]
return np.array(u)
def binary_ints(m):
'''
Returns a matrix where row 'i' is dec_to_bin(i, m), for i from 0 to 2 ** m - 1.
From https://stackoverflow.com/questions/28111051/create-a-matrix-of-binary-representation-of-numbers-in-python.
'''
a = np.arange(2 ** m, dtype=int)[np.newaxis,:]
b = np.arange(m, dtype=int)[::-1,np.newaxis]
return np.array(a & 2**b > 0, dtype=int)
def base_ints(q, m):
'''
Returns a matrix where row 'i' is the base-q representation of i, for i from 0 to q ** m - 1.
Covers the functionality of binary_ints when n = 2, but binary_ints is faster for that case.
'''
get_row = lambda i: np.array([int(j) for j in np.base_repr(i, base=q).zfill(m)])
return np.vstack((get_row(i) for i in range(q ** m)))
def polymod(p1, p2, q, m):
'''
Computes p1 modulo p2, and takes the coefficients modulo q.
'''
p1 = np.trim_zeros(p1, trim='f')
p2 = np.trim_zeros(p2, trim='f')
while len(p1) >= len(p2) and len(p1) > 0:
p1 -= p1[0] // p2[0] * np.pad(p2, (0, len(p1) - len(p2)))
p1 = np.trim_zeros(p1, trim='f')
return np.pad(np.mod(p1, q), (m + 1 - len(p1), 0))
def rref(A, b, q):
'''
Row reduction, to easily solve finite field systems.
'''
raise NotImplementedError()
def sign(x):
'''
Replacement for np.sign that matches the convention (footnote 2 on page 11).
'''
return (1 - np.sign(x)) // 2
def flip(x):
'''
Flip all bits in the binary array x.
'''
return np.bitwise_xor(x, 1) | {"hexsha": "4f2171b61df7960f20ec78f4ff5fb57756884bb8", "size": 2236, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/utils.py", "max_stars_repo_name": "aditya-sengupta/spright", "max_stars_repo_head_hexsha": "8c369c6eb33e0a677afaf039973faa11da8b458b", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-07-20T23:25:07.000Z", "max_stars_repo_stars_event_max_datetime": "2020-07-20T23:25:07.000Z", "max_issues_repo_path": "src/utils.py", "max_issues_repo_name": "aditya-sengupta/spright", "max_issues_repo_head_hexsha": "8c369c6eb33e0a677afaf039973faa11da8b458b", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/utils.py", "max_forks_repo_name": "aditya-sengupta/spright", "max_forks_repo_head_hexsha": "8c369c6eb33e0a677afaf039973faa11da8b458b", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.4210526316, "max_line_length": 116, "alphanum_fraction": 0.5769230769, "include": true, "reason": "import numpy", "num_tokens": 689} |
import os
import numpy as np
import tensorflow as tf
import soundfile as sf
from tqdm import tqdm
import pandas as pd
class Trainer():
def __init__(self, model, optimizer,loss, strategy, path_experiment, args):
self.model=model
print(self.model.summary())
self.strategy=strategy
self.optimizer=optimizer
self.path_experiment=path_experiment
self.args=args
#self.metrics=[]
with self.strategy.scope():
#loss_fn=tf.keras.losses.mean_absolute_error
loss.reduction=tf.keras.losses.Reduction.NONE
self.loss_object=loss
self.train_mae_s1=tf.keras.metrics.MeanAbsoluteError(name="train_mae_s1")
self.train_mae=tf.keras.metrics.MeanAbsoluteError(name="train_mae_s2")
self.val_mae=tf.keras.metrics.MeanAbsoluteError(name="validation_mae")
self.val_loss = tf.keras.metrics.Mean(name='test_loss')
def train_step(self,inputs):
noisy, clean= inputs
with tf.GradientTape() as tape:
logits_2,logits_1 = self.model(noisy, training=True) # Logits for this minibatch
loss_value = tf.reduce_mean(self.loss_object(clean, logits_2) + tf.reduce_mean(self.loss_object(clean, logits_1)))
grads = tape.gradient(loss_value, self.model.trainable_weights)
self.optimizer.apply_gradients(zip(grads, self.model.trainable_weights))
self.train_mae.update_state(clean, logits_2)
self.train_mae_s1.update_state(clean, logits_1)
return loss_value
def test_step(self,inputs):
noisy,clean = inputs
predictions_s2, predictions_s1 = self.model(noisy, training=False)
t_loss = self.loss_object(clean, predictions_s2)+self.loss_object(clean, predictions_s1)
self.val_mae.update_state(clean,predictions_s2)
self.val_loss.update_state(t_loss)
@tf.function()
def distributed_training_step(self,inputs):
per_replica_losses=self.strategy.run(self.train_step, args=(inputs,))
reduced_losses=self.strategy.reduce(tf.distribute.ReduceOp.MEAN, per_replica_losses, axis=None)
return reduced_losses
@tf.function
def distributed_test_step(self,inputs):
return self.strategy.run(self.test_step, args=(inputs,))
| {"hexsha": "cdaf2225ed42997b6c0285c181a3ecb9091494ba", "size": 2378, "ext": "py", "lang": "Python", "max_stars_repo_path": "trainer.py", "max_stars_repo_name": "IgnacioIrigaray/denoising-historical-recordings", "max_stars_repo_head_hexsha": "12f3f070e6131c58fbab810e0fc33f4a2cd8fb51", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 38, "max_stars_repo_stars_event_min_datetime": "2021-09-09T13:21:47.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-26T17:39:01.000Z", "max_issues_repo_path": "trainer.py", "max_issues_repo_name": "IgnacioIrigaray/denoising-historical-recordings", "max_issues_repo_head_hexsha": "12f3f070e6131c58fbab810e0fc33f4a2cd8fb51", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 3, "max_issues_repo_issues_event_min_datetime": "2022-01-19T15:49:57.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-02T21:20:15.000Z", "max_forks_repo_path": "trainer.py", "max_forks_repo_name": "IgnacioIrigaray/denoising-historical-recordings", "max_forks_repo_head_hexsha": "12f3f070e6131c58fbab810e0fc33f4a2cd8fb51", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 5, "max_forks_repo_forks_event_min_datetime": "2021-09-10T15:10:03.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-17T18:38:49.000Z", "avg_line_length": 35.4925373134, "max_line_length": 126, "alphanum_fraction": 0.6766190076, "include": true, "reason": "import numpy", "num_tokens": 521} |
Require Import Coq.Lists.List. Import ListNotations.
Require Import
Coq.Bool.Bool
Coq.Strings.Ascii
Coq.Strings.String.
Local Open Scope string.
Local Open Scope char.
From SRC Require Export CStrings.
(* Act II - Scene 0, Before Semirings *)
Inductive regex : Type :=
| Eps : regex
| Sym : bool -> ascii -> regex
| Alt : regex -> regex -> regex
| Seq : regex -> regex -> regex
| Rep : regex -> regex.
Fixpoint empty (x : regex) : bool :=
match x with
| Eps => true
| Sym _ _ => false
| Alt p q => empty p || empty q
| Seq p q => empty p && empty q
| Rep r => true
end.
Fixpoint final (x : regex) : bool :=
match x with
| Eps => false
| Sym b _ => b
| Alt p q => final p || final q
| Seq p q => final p && empty q || final q
| Rep r => final r
end.
Fixpoint shift (m : bool) (x : regex) (c : ascii)
: regex :=
match x with
| Eps => Eps
| Sym _ x =>
Sym (m && (eq_ascii x c)) x
| Alt p q =>
Alt (shift m p c) (shift m q c)
| Seq p q =>
Seq (shift m p c)
(shift (m && empty p || final q) q c)
| Rep r =>
Rep (shift (m || final r) r c)
end.
Definition rmatch' (r : regex) (s : lstring) : bool :=
match s with
| [] => empty r
| (c :: cs) =>
final (fold_left (shift false) cs (shift true r c))
end.
Definition rmatch (r : regex) (s : string) : bool :=
rmatch' r (str_to_lstr s).
Compute shift true (Seq (Sym false "a") (Rep (Sym false "b"))) "a".
Compute shift true (Seq (Sym true "a") (Rep (Sym false "b"))) "b".
Compute rmatch (Seq (Sym true "a") (Sym false "b")) "ab".
| {"author": "mtrsk", "repo": "Regex-Play-Coq", "sha": "6f04e9299cfaa85377cf50d0470750d95abbb0b9", "save_path": "github-repos/coq/mtrsk-Regex-Play-Coq", "path": "github-repos/coq/mtrsk-Regex-Play-Coq/Regex-Play-Coq-6f04e9299cfaa85377cf50d0470750d95abbb0b9/src/EffMatching.v"} |
using DynamicalSystemsBase
function eom_ar1_unidir(x, p, n)
a₁, b₁, c_xy, σ = (p...,)
x, y = (x...,)
ξ₁ = rand(Normal(0, σ))
ξ₂ = rand(Normal(0, σ))
dx = a₁*x + ξ₁
dy = b₁*y + c_xy*x + ξ₂
return SVector{2}(dx, dy)
end
function ar1_unidir(;uᵢ = rand(2), a₁ = 0.90693, b₁ = 0.40693, c_xy = 0.5, σ = 0.40662)
p = [a₁, b₁, c_xy, σ]
DiscreteDynamicalSystem(eom_ar1_unidir, uᵢ, p)
end
vars = (1, 2)
npts, tstep = 50, 50
d_xind = Uniform(2.5, 5.5)
d_yind = Uniform(2.5, 5.5)
d_xval = Uniform(0.01, 0.2)
d_yval = Uniform(0.01, 0.2)
X, Y = example_uncertain_indexvalue_datasets(ar1_unidir(c_xy = 0.5), npts, vars, tstep = tstep,
d_xind = d_xind, d_yind = d_yind,
d_xval = d_xval, d_yval = d_yval);
time_grid = -20:100:2540
n_draws = 10000 # draws per uncertain value
n_bins = length(time_grid) - 1
wts = rand(length(X))
# Values in each bin represented as RawValues
b = BinnedWeightedResampling(RawValues, time_grid, wts, n_draws)
bc, vs = bin(Y, b);
@test vs isa Vector{Vector{T}} where T
@test length(vs) == n_bins
# Values in each bin represented as UncertainScalarKDE
b_kde = BinnedWeightedResampling(UncertainScalarKDE, time_grid, wts, n_draws)
Y_binned = bin(Y, b_kde);
@test Y_binned isa AbstractUncertainIndexValueDataset
# Values in each bin represented as UncertainScalarPopulation
b_pop = BinnedWeightedResampling(UncertainScalarPopulation, time_grid, wts, n_draws)
Y_binned = bin(Y, b_pop);
@test Y_binned isa AbstractUncertainIndexValueDataset
| {"hexsha": "faf5917f642bdf0022ce4e7422362d0749b71dbb", "size": 1512, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "test/resampling/binning/test_bin_BinnedWeightedResampling.jl", "max_stars_repo_name": "JuliaTagBot/UncertainData.jl", "max_stars_repo_head_hexsha": "4d9dc513b97f04a1d761e0a94eab3e3b11cc4c8a", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 14, "max_stars_repo_stars_event_min_datetime": "2019-01-04T10:13:08.000Z", "max_stars_repo_stars_event_max_datetime": "2021-10-03T01:11:13.000Z", "max_issues_repo_path": "test/resampling/binning/test_bin_BinnedWeightedResampling.jl", "max_issues_repo_name": "JuliaTagBot/UncertainData.jl", "max_issues_repo_head_hexsha": "4d9dc513b97f04a1d761e0a94eab3e3b11cc4c8a", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 78, "max_issues_repo_issues_event_min_datetime": "2018-12-17T20:12:44.000Z", "max_issues_repo_issues_event_max_datetime": "2021-11-19T20:46:04.000Z", "max_forks_repo_path": "test/resampling/binning/test_bin_BinnedWeightedResampling.jl", "max_forks_repo_name": "JuliaTagBot/UncertainData.jl", "max_forks_repo_head_hexsha": "4d9dc513b97f04a1d761e0a94eab3e3b11cc4c8a", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 5, "max_forks_repo_forks_event_min_datetime": "2019-01-22T23:05:29.000Z", "max_forks_repo_forks_event_max_datetime": "2021-11-19T12:21:02.000Z", "avg_line_length": 28.0, "max_line_length": 95, "alphanum_fraction": 0.6951058201, "num_tokens": 572} |
#! format: off
abstract type AbstractRegulationFormulation <: AbstractDeviceFormulation end
struct ReserveLimitedRegulation <: AbstractRegulationFormulation end
struct DeviceLimitedRegulation <: AbstractRegulationFormulation end
get_variable_sign(_, ::Type{PSY.RegulationDevice{PSY.ThermalStandard}}, ::DeviceLimitedRegulation) = NaN
############################ DeltaActivePowerUpVariable, RegulationDevice ###########################
get_variable_binary(::DeltaActivePowerUpVariable, ::Type{<:PSY.RegulationDevice}, ::AbstractRegulationFormulation) = false
get_variable_lower_bound(::DeltaActivePowerUpVariable, ::PSY.RegulationDevice, ::AbstractRegulationFormulation) = 0.0
############################ DeltaActivePowerDownVariable, RegulationDevice ###########################
get_variable_binary(::DeltaActivePowerDownVariable, ::Type{<:PSY.RegulationDevice}, ::AbstractRegulationFormulation) = false
get_variable_lower_bound(::DeltaActivePowerDownVariable, ::PSY.RegulationDevice, ::AbstractRegulationFormulation) = 0.0
############################ AdditionalDeltaActivePowerUpVariable, RegulationDevice ###########################
get_variable_binary(::AdditionalDeltaActivePowerUpVariable, ::Type{<:PSY.RegulationDevice}, ::AbstractRegulationFormulation) = false
get_variable_lower_bound(::AdditionalDeltaActivePowerUpVariable, ::PSY.RegulationDevice, ::AbstractRegulationFormulation) = 0.0
############################ AdditionalDeltaActivePowerDownVariable, RegulationDevice ###########################
get_variable_binary(::AdditionalDeltaActivePowerDownVariable, ::Type{<:PSY.RegulationDevice}, ::AbstractRegulationFormulation) = false
get_variable_lower_bound(::AdditionalDeltaActivePowerDownVariable, ::PSY.RegulationDevice, ::AbstractRegulationFormulation) = 0.0
#! format: on
function add_constraints!(
optimization_container::OptimizationContainer,
::Type{RangeConstraint},
::Type{DeltaActivePowerUpVariable},
devices::IS.FlattenIteratorWrapper{PSY.RegulationDevice{T}},
::DeviceModel{PSY.RegulationDevice{T}, DeviceLimitedRegulation},
::Type{AreaBalancePowerModel},
::Nothing,
) where {T <: PSY.StaticInjection}
parameters = model_has_parameters(optimization_container)
var_name_up = make_variable_name(DeltaActivePowerUpVariable, T)
var_up = get_variable(optimization_container, var_name_up)
names = [PSY.get_name(g) for g in devices]
time_steps = model_time_steps(optimization_container)
up = Symbol("regulation_limits_up_$(T)")
container_up = add_cons_container!(optimization_container, up, names, time_steps)
constraint_infos = Vector{DeviceTimeSeriesConstraintInfo}(undef, length(devices))
for (ix, d) in enumerate(devices)
ts_vector = get_time_series(optimization_container, d, "max_active_power")
constraint_info = DeviceTimeSeriesConstraintInfo(
d,
x -> PSY.get_max_active_power(x),
ts_vector,
x -> PSY.get_active_power_limits(x),
)
constraint_infos[ix] = constraint_info
end
if parameters
base_points_param = get_parameter_container(
optimization_container,
make_variable_name(ACTIVE_POWER, T),
)
multiplier = get_multiplier_array(base_points_param)
base_points = get_parameter_array(base_points_param)
end
for d in constraint_infos
name = get_component_name(d)
limits = get_limits(d)
for t in time_steps
rating = parameters ? multiplier[name, t] : d.multiplier
base_point = parameters ? base_points[name, t] : get_timeseries(d)[t]
container_up[name, t] = JuMP.@constraint(
optimization_container.JuMPmodel,
var_up[name, t] <= limits.max - base_point * rating
)
end
end
return
end
function add_constraints!(
optimization_container::OptimizationContainer,
::Type{RangeConstraint},
::Type{DeltaActivePowerDownVariable},
devices::IS.FlattenIteratorWrapper{PSY.RegulationDevice{T}},
::DeviceModel{PSY.RegulationDevice{T}, DeviceLimitedRegulation},
::Type{AreaBalancePowerModel},
::Nothing,
) where {T <: PSY.StaticInjection}
parameters = model_has_parameters(optimization_container)
var_name_dn = make_variable_name(DeltaActivePowerDownVariable, T)
var_dn = get_variable(optimization_container, var_name_dn)
names = [PSY.get_name(g) for g in devices]
time_steps = model_time_steps(optimization_container)
dn = Symbol("regulation_limits_dn_$(T)")
container_dn = add_cons_container!(optimization_container, dn, names, time_steps)
constraint_infos = Vector{DeviceTimeSeriesConstraintInfo}(undef, length(devices))
for (ix, d) in enumerate(devices)
ts_vector = get_time_series(optimization_container, d, "max_active_power")
constraint_info = DeviceTimeSeriesConstraintInfo(
d,
x -> PSY.get_max_active_power(x),
ts_vector,
x -> PSY.get_active_power_limits(x),
)
constraint_infos[ix] = constraint_info
end
if parameters
base_points_param = get_parameter_container(
optimization_container,
make_variable_name(ACTIVE_POWER, T),
)
multiplier = get_multiplier_array(base_points_param)
base_points = get_parameter_array(base_points_param)
end
for d in constraint_infos
name = get_component_name(d)
limits = get_limits(d)
for t in time_steps
rating = parameters ? multiplier[name, t] : d.multiplier
base_point = parameters ? base_points[name, t] : get_timeseries(d)[t]
container_dn[name, t] = JuMP.@constraint(
optimization_container.JuMPmodel,
var_dn[name, t] <= base_point * rating - limits.min
)
end
end
return
end
function add_constraints!(
optimization_container::OptimizationContainer,
::Type{RangeConstraint},
::Type{DeltaActivePowerUpVariable},
devices::IS.FlattenIteratorWrapper{PSY.RegulationDevice{T}},
::DeviceModel{PSY.RegulationDevice{T}, ReserveLimitedRegulation},
::Type{AreaBalancePowerModel},
::Nothing,
) where {T <: PSY.StaticInjection}
var_name_up = make_variable_name(DeltaActivePowerUpVariable, T)
var_up = get_variable(optimization_container, var_name_up)
names = [PSY.get_name(g) for g in devices]
time_steps = model_time_steps(optimization_container)
up = Symbol("regulation_limits_up_$(T)")
container_up = add_cons_container!(optimization_container, up, names, time_steps)
for d in devices
name = PSY.get_name(d)
limit_up = PSY.get_reserve_limit_up(d)
for t in time_steps
container_up[name, t] = JuMP.@constraint(
optimization_container.JuMPmodel,
var_up[name, t] <= limit_up
)
end
end
return
end
function add_constraints!(
optimization_container::OptimizationContainer,
::Type{RangeConstraint},
::Type{DeltaActivePowerDownVariable},
devices::IS.FlattenIteratorWrapper{PSY.RegulationDevice{T}},
::DeviceModel{PSY.RegulationDevice{T}, ReserveLimitedRegulation},
::Type{AreaBalancePowerModel},
::Nothing,
) where {T <: PSY.StaticInjection}
var_name_dn = make_variable_name(DeltaActivePowerDownVariable, T)
var_dn = get_variable(optimization_container, var_name_dn)
names = [PSY.get_name(g) for g in devices]
time_steps = model_time_steps(optimization_container)
dn = Symbol("regulation_limits_dn_$(T)")
container_dn = add_cons_container!(optimization_container, dn, names, time_steps)
for d in devices
name = PSY.get_name(d)
limit_up = PSY.get_reserve_limit_dn(d)
for t in time_steps
container_dn[name, t] = JuMP.@constraint(
optimization_container.JuMPmodel,
var_dn[name, t] <= limit_up
)
end
end
return
end
function ramp_constraints!(
optimization_container::OptimizationContainer,
devices::IS.FlattenIteratorWrapper{PSY.RegulationDevice{T}},
::DeviceModel{PSY.RegulationDevice{T}, DeviceLimitedRegulation},
::Type{AreaBalancePowerModel},
::Nothing,
) where {T <: PSY.ThermalStandard}
R_up = get_variable(optimization_container, DeltaActivePowerUpVariable, T)
R_dn = get_variable(optimization_container, DeltaActivePowerDownVariable, T)
resolution = Dates.value(Dates.Second(model_resolution(optimization_container)))
names = [PSY.get_name(g) for g in devices]
time_steps = model_time_steps(optimization_container)
container_up =
add_cons_container!(optimization_container, :ramp_limits_up, names, time_steps)
container_dn =
add_cons_container!(optimization_container, :ramp_limits_dn, names, time_steps)
for d in devices
ramp_limits = PSY.get_ramp_limits(d)
ramp_limits === nothing && continue
scaling_factor = resolution * SECONDS_IN_MINUTE
name = PSY.get_name(d)
for t in time_steps
container_up[name, t] = JuMP.@constraint(
optimization_container.JuMPmodel,
R_up[name, t] <= ramp_limits.up * scaling_factor
)
container_dn[name, t] = JuMP.@constraint(
optimization_container.JuMPmodel,
R_dn[name, t] <= ramp_limits.down * scaling_factor
)
end
end
return
end
function participation_assignment!(
optimization_container::OptimizationContainer,
devices::IS.FlattenIteratorWrapper{PSY.RegulationDevice{T}},
::DeviceModel{PSY.RegulationDevice{T}, <:AbstractRegulationFormulation},
::Type{AreaBalancePowerModel},
::Nothing,
) where {T <: PSY.StaticInjection}
time_steps = model_time_steps(optimization_container)
R_up = get_variable(optimization_container, DeltaActivePowerUpVariable, T)
R_dn = get_variable(optimization_container, DeltaActivePowerDownVariable, T)
R_up_emergency =
get_variable(optimization_container, AdditionalDeltaActivePowerUpVariable, T)
R_dn_emergency =
get_variable(optimization_container, AdditionalDeltaActivePowerUpVariable, T)
area_reserve_up =
get_variable(optimization_container, DeltaActivePowerUpVariable, PSY.Area)
area_reserve_dn =
get_variable(optimization_container, DeltaActivePowerDownVariable, PSY.Area)
component_names = [PSY.get_name(d) for d in devices]
participation_assignment_up = JuMPConstraintArray(undef, component_names, time_steps)
participation_assignment_dn = JuMPConstraintArray(undef, component_names, time_steps)
assign_constraint!(
optimization_container,
"participation_assignment_up",
participation_assignment_up,
)
assign_constraint!(
optimization_container,
"participation_assignment_dn",
participation_assignment_dn,
)
expr_up = get_expression(optimization_container, :emergency_up)
expr_dn = get_expression(optimization_container, :emergency_dn)
for d in devices
name = PSY.get_name(d)
services = PSY.get_services(d)
if length(services) > 1
device_agc = (a for a in PSY.get_services(d) if isa(a, PSY.AGC))
area_name = PSY.get_name.(PSY.get_area.(device_agc))[1]
else
device_agc = first(services)
area_name = PSY.get_name(PSY.get_area(device_agc))
end
p_factor = PSY.get_participation_factor(d)
for t in time_steps
participation_assignment_up[name, t] = JuMP.@constraint(
optimization_container.JuMPmodel,
R_up[name, t] ==
(p_factor.up * area_reserve_up[area_name, t]) + R_up_emergency[name, t]
)
participation_assignment_dn[name, t] = JuMP.@constraint(
optimization_container.JuMPmodel,
R_dn[name, t] ==
(p_factor.dn * area_reserve_dn[area_name, t]) + R_dn_emergency[name, t]
)
JuMP.add_to_expression!(expr_up[area_name, t], -1 * R_up_emergency[name, t])
JuMP.add_to_expression!(expr_dn[area_name, t], -1 * R_dn_emergency[name, t])
end
end
return
end
function regulation_cost!(
optimization_container::OptimizationContainer,
devices::IS.FlattenIteratorWrapper{PSY.RegulationDevice{T}},
::DeviceModel{PSY.RegulationDevice{T}, <:AbstractRegulationFormulation},
) where {T <: PSY.StaticInjection}
time_steps = model_time_steps(optimization_container)
R_up = get_variable(optimization_container, DeltaActivePowerUpVariable, T)
R_dn = get_variable(optimization_container, DeltaActivePowerDownVariable, T)
R_up_emergency =
get_variable(optimization_container, AdditionalDeltaActivePowerUpVariable, T)
R_dn_emergency =
get_variable(optimization_container, AdditionalDeltaActivePowerUpVariable, T)
for d in devices
cost = PSY.get_cost(d)
p_factor = PSY.get_participation_factor(d)
up_cost =
isapprox(p_factor.up, 0.0; atol = 1e-2) ? SERVICES_SLACK_COST : 1 / p_factor.up
dn_cost =
isapprox(p_factor.dn, 0.0; atol = 1e-2) ? SERVICES_SLACK_COST : 1 / p_factor.dn
for t in time_steps
JuMP.add_to_expression!(
optimization_container.cost_function,
R_up_emergency[PSY.get_name(d), t],
up_cost,
)
JuMP.add_to_expression!(
optimization_container.cost_function,
R_dn_emergency[PSY.get_name(d), t],
dn_cost,
)
end
end
return
end
function NodalExpressionSpec(
::Type{<:PSY.RegulationDevice{T}},
::Type{AreaBalancePowerModel},
use_forecasts::Bool,
) where {T <: PSY.StaticInjection}
return NodalExpressionSpec(
"max_active_power",
make_variable_name(ActivePowerVariable, T),
use_forecasts ? x -> PSY.get_max_active_power(x) : x -> PSY.get_active_power(x),
1.0,
JuMP.VariableRef,
)
end
| {"hexsha": "4105c5d7f3ca37474ddc7d0408aaca4d3d508f35", "size": 14206, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/devices_models/devices/regulation_device.jl", "max_stars_repo_name": "sambuddhac/PowerSimulations.jl", "max_stars_repo_head_hexsha": "5e485fab40dcd16d1cb4ed497b07373171ee0aa7", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/devices_models/devices/regulation_device.jl", "max_issues_repo_name": "sambuddhac/PowerSimulations.jl", "max_issues_repo_head_hexsha": "5e485fab40dcd16d1cb4ed497b07373171ee0aa7", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/devices_models/devices/regulation_device.jl", "max_forks_repo_name": "sambuddhac/PowerSimulations.jl", "max_forks_repo_head_hexsha": "5e485fab40dcd16d1cb4ed497b07373171ee0aa7", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 40.1299435028, "max_line_length": 134, "alphanum_fraction": 0.6904828946, "num_tokens": 3234} |