text
stringlengths
0
1.25M
meta
stringlengths
47
1.89k
[STATEMENT] lemma stoch_proc_filt_eq_sets: assumes "space M = space N" shows "sets (stoch_proc_filt M X P n) = sets (stoch_proc_filt N X P n)" [PROOF STATE] proof (prove) goal (1 subgoal): 1. sets (stoch_proc_filt M X P n) = sets (stoch_proc_filt N X P n) [PROOF STEP] unfolding stoch_proc_filt_def [PROOF STATE] proof (prove) goal (1 subgoal): 1. sets (gen_subalgebra M (sigma_sets (space M) (\<Union>i\<in>{m. m \<le> n}. {X i -` A \<inter> space M |A. A \<in> sets P}))) = sets (gen_subalgebra N (sigma_sets (space N) (\<Union>i\<in>{m. m \<le> n}. {X i -` A \<inter> space N |A. A \<in> sets P}))) [PROOF STEP] proof (rule gen_subalgebra_eq_space_sets, (simp add: assms)+) [PROOF STATE] proof (state) goal (1 subgoal): 1. sigma_sets (space N) (\<Union>x\<in>{m. m \<le> n}. {X x -` A \<inter> space N |A. A \<in> sets P}) \<subseteq> Pow (space N) [PROOF STEP] show "sigma_sets (space N) (\<Union>x\<in>{m. m \<le> n}. {X x -` A \<inter> space N |A. A \<in> sets P}) \<subseteq> Pow (space N)" [PROOF STATE] proof (prove) goal (1 subgoal): 1. sigma_sets (space N) (\<Union>x\<in>{m. m \<le> n}. {X x -` A \<inter> space N |A. A \<in> sets P}) \<subseteq> Pow (space N) [PROOF STEP] proof (rule sigma_algebra.sigma_sets_subset) [PROOF STATE] proof (state) goal (2 subgoals): 1. sigma_algebra (space N) (Pow (space N)) 2. (\<Union>x\<in>{m. m \<le> n}. {X x -` A \<inter> space N |A. A \<in> sets P}) \<subseteq> Pow (space N) [PROOF STEP] show "sigma_algebra (space N) (Pow (space N))" [PROOF STATE] proof (prove) goal (1 subgoal): 1. sigma_algebra (space N) (Pow (space N)) [PROOF STEP] by (simp add: sigma_algebra_Pow) [PROOF STATE] proof (state) this: sigma_algebra (space N) (Pow (space N)) goal (1 subgoal): 1. (\<Union>x\<in>{m. m \<le> n}. {X x -` A \<inter> space N |A. A \<in> sets P}) \<subseteq> Pow (space N) [PROOF STEP] show "(\<Union>x\<in>{m. m \<le> n}. {X x -` A \<inter> space N |A. A \<in> sets P}) \<subseteq> Pow (space N)" [PROOF STATE] proof (prove) goal (1 subgoal): 1. (\<Union>x\<in>{m. m \<le> n}. {X x -` A \<inter> space N |A. A \<in> sets P}) \<subseteq> Pow (space N) [PROOF STEP] by auto [PROOF STATE] proof (state) this: (\<Union>x\<in>{m. m \<le> n}. {X x -` A \<inter> space N |A. A \<in> sets P}) \<subseteq> Pow (space N) goal: No subgoals! [PROOF STEP] qed [PROOF STATE] proof (state) this: sigma_sets (space N) (\<Union>x\<in>{m. m \<le> n}. {X x -` A \<inter> space N |A. A \<in> sets P}) \<subseteq> Pow (space N) goal: No subgoals! [PROOF STEP] qed
{"llama_tokens": 1096, "file": "DiscretePricing_Infinite_Coin_Toss_Space", "length": 10}
# TODO add test cases for half, single, and longdouble functions. # TODO add test cases for equivalent dtypes: bool_, bytes_, str_, int0, and uint0. # TODO add test cases for mask operations: mask_or, mask_and, mask_xor, mask_andnot, mask_ori, mask_andi, mask_xori, and mask_andnoti. # TODO fold all int, uint and float equivalence tests into one method that uses pytest parameters import builtins import operator import random import numpy as np import riptable as rt import pytest import hypothesis from typing import List from riptable import FastArray, FA from numpy.testing import assert_allclose from hypothesis import assume, event, example, given, HealthCheck from hypothesis.extra.numpy import ( arrays, basic_indices, boolean_dtypes, floating_dtypes, integer_dtypes, ) from hypothesis.strategies import ( integers, booleans, shared, data, lists, floats, sampled_from, slices, text, just, ) # riptable custom Hypothesis strategies from .strategies.helper_strategies import ( _MAX_FLOAT, _MAX_INT, _MAX_VALUE, floating_scalar, generate_array_and_axis, generate_array_and_where, generate_array_axis_and_ddof, generate_array_axis_and_repeats_array, generate_reshape_array_and_shape_strategy, generate_sample_test_floats, generate_sample_test_integers, generate_tuples_of_arrays, interpolation_data, ints_floats_complex_or_booleans, ints_floats_datetimes_and_timedeltas, ints_floats_or_complex_dtypes, ints_or_floats_dtypes, ints_or_floats_example, ndarray_shape_strategy, one_darray_shape_strategy, same_structure_ndarrays, start_stop_step_strategy, _MAX_SHAPE_SIZE, ) from riptable.Utils.rt_testing import ( assert_array_equal_, assert_equal_, _NDARRAY_FASTARRAY_COMMON_AND_DIFF, ) from riptable.Utils.teamcity_helper import is_running_in_teamcity """ Commonalities between riptable and numpy >> import numpy as np >> import riptable as rt >> sorted(list(set(dir(rt)).intersection(set(dir(np))))) >> ['__builtins__', '__cached__', '__doc__', '__file__', '__loader__', '__name__', '__package__', '__path__', '__spec__', '__version__', 'abs', 'absolute', 'all', 'any', 'arange', 'argsort', 'bincount', 'ceil', 'concatenate', 'cumsum', 'diff', 'double', 'empty', 'empty_like', 'float32', 'float64', 'floor', 'full', 'hstack', 'int16', 'int32', 'int64', 'int8', 'interp', 'isfinite', 'isinf', 'isnan', 'lexsort', 'log', 'log10', 'max', 'maximum', 'mean', 'median', 'min', 'minimum', 'nan', 'nan_to_num', 'nanmax', 'nanmean', 'nanmedian', 'nanmin', 'nanpercentile', 'nanstd', 'nansum', 'nanvar', 'ones', 'ones_like', 'percentile', 'putmask', 'quantile', 'repeat', 'reshape', 'round', 'searchsorted', 'single', 'size', 'sort', 'std', 'sum', 'tile', 'transpose', 'trunc', 'uint16', 'uint32', 'uint64', 'uint8', 'unique', 'var', 'vstack', 'where', 'zeros', 'zeros_like'] """ class TestRiptableNumpyEquivalency: @pytest.mark.xfail( reason="https://jira/browse/SOQTEST-6479 abs calls np.abs instead of rt.absolute" ) @pytest.mark.skipif( is_running_in_teamcity(), reason="Please remove alongside xfail removal." ) @given( arr_and_where=generate_array_and_where( shape=ndarray_shape_strategy(), dtype=ints_or_floats_dtypes() ) ) def test_abs(self, arr_and_where): arr, where_arr = arr_and_where # Test #1: Returns new array containing results; no optional parameters provided. rt_abs = rt.abs(arr) np_abs = np.abs(arr) assert isinstance(rt_abs, FastArray) assert_array_equal_(np.array(rt_abs), np_abs) # Test #2: Returns new array containing results; 'where' bitmask is provided. rt_abs = rt.abs(arr, where=where_arr) np_abs = np.abs(arr, where=where_arr) assert isinstance(rt_abs, FastArray) assert_array_equal_(np.array(rt_abs), np_abs) # Test #3: Results written to array specified in 'out' parameter; no other optional params. rt_abs = np.zeros_like(arr) np_abs = np.zeros_like(arr) rt_output = rt.abs(arr, out=rt_abs) np_output = np.abs(arr, out=np_abs) assert_array_equal_(np.array(rt_abs), np_abs) # TODO: Add assertions for rt_output and np_output -- what are they expected to return when the 'out' parameter is specified? # assert isinstance(rt_output, FastArray, msg="riptable.abs() did not return a FastArray") # Test #4: Results written to array specified in 'out' parameter; 'where' bitmask is provided. rt_abs = np.zeros_like(arr) np_abs = np.zeros_like(arr) rt_output = rt.abs(arr, where=where_arr, out=rt_abs) np_output = np.abs(arr, where=where_arr, out=np_abs) assert_array_equal_(np.array(rt_abs), np_abs) # TODO: Add assertions for rt_output and np_output -- what are they expected to return when the 'out' parameter is specified? # assert isinstance(rt_output, FastArray, msg="riptable.abs() did not return a FastArray") @pytest.mark.xfail( reason="https://jira/browse/RIP-357 discrepency between rt.absolute and np.absolute" ) @pytest.mark.skipif( is_running_in_teamcity(), reason="Please remove alongside xfail removal." ) @given( arr_and_where=generate_array_and_where( shape=ndarray_shape_strategy(), dtype=ints_or_floats_dtypes() ) ) def test_absolute(self, arr_and_where): arr, where_arr = arr_and_where # Test #1: Returns new array containing results; no optional parameters provided. rt_output = rt.absolute(arr) np_output = np.absolute(arr) assert isinstance(rt_output, FastArray) assert_array_equal_(np.array(rt_output), np_output) # Test #2: Returns new array containing results; 'where' bitmask is provided. rt_output = rt.absolute(arr, where=where_arr) np_absolute = np.absolute(arr, where=where_arr) assert isinstance(rt_output, FastArray) assert_array_equal_(np.array(rt_output), np_output) # Test #3: Results written to array specified in 'out' parameter; no other optional params. rt_inplace = np.zeros_like(arr) np_inplace = np.zeros_like(arr) rt_output = rt.absolute(arr, out=rt_inplace) np_output = np.absolute(arr, out=np_inplace) assert_array_equal_(np.array(rt_inplace), np_inplace) # TODO: Add assertions for rt_output and np_output -- what are they expected to return when the 'out' parameter is specified? # assert isinstance(rt_output, FastArray, msg="riptable.absolute() did not return a FastArray") # Test #4: Results written to array specified in 'out' parameter; 'where' bitmask is provided. rt_inplace = np.zeros_like(arr) np_inplace = np.zeros_like(arr) rt_output = rt.absolute(arr, where=where_arr, out=rt_inplace) np_output = np.absolute(arr, where=where_arr, out=np_inplace) assert_array_equal_(np.array(rt_inplace), np_inplace) # TODO: Add assertions for rt_output and np_output -- what are they expected to return when the 'out' parameter is specified? # assert isinstance(rt_output, FastArray, msg="riptable.absolute() did not return a FastArray") @given( arr_and_axis=generate_array_and_axis( shape=ndarray_shape_strategy(), dtype=ints_floats_complex_or_booleans() ) ) def test_all(self, arr_and_axis): arr = arr_and_axis[0] axis = arr_and_axis[1] rt_all = rt.all(arr, axis=axis) np_all = np.all(arr, axis=axis) if axis is None: # If axis is None, all is performed on entire matrix, so it should have the same result as the builtin all() built_in_all = builtins.all(arr.flatten()) assert rt_all == built_in_all assert rt_all == np_all else: # if performing all() over a certain axis, it will return an array/matrix assert_array_equal_(np.array(rt_all), np_all) @given( arr_and_axis=generate_array_and_axis( shape=ndarray_shape_strategy(), dtype=ints_floats_complex_or_booleans() ) ) def test_any(self, arr_and_axis): arr = arr_and_axis[0] axis = arr_and_axis[1] rt_any = rt.any(arr, axis=axis) np_any = np.any(arr, axis=axis) if axis is None: # If axis is None, any is performed on entire matrix, so it should have the same result as the builtin any() built_in_any = builtins.any(arr.flatten()) assert rt_any == built_in_any assert rt_any == np_any else: # if performing any() over a certain axis, it will return an array/matrix assert_array_equal_(np.array(rt_any), np_any) @hypothesis.settings(suppress_health_check=[HealthCheck.too_slow]) @example( start_stop_step={ "start": (-1000000 - 1000000j), "stop": (1000000 + 1000000j), "step": (0.0000001 + 0.0000001j), } ) @given(start_stop_step_strategy()) def test_arange(self, start_stop_step): start = start_stop_step["start"] stop = start_stop_step["stop"] step = start_stop_step["step"] # This block is instead of the assume(length < MAX_VALUE) so the example can be used to test large # numbers and complex numbers. min_step = abs((stop - start) / _MAX_VALUE) if min_step > abs(step): step = (step / abs(step)) * min_step # Compare results of arange and exception message, if one is thrown. rt_error_msg = "" np_error_msg = "" try: rt_arange = rt.arange(start, stop, step) except Exception as e: rt_arange = None rt_error_msg = str(e) try: np_arange = np.arange(start, stop, step) except Exception as e: np_arange = None np_error_msg = str(e) if rt_error_msg and np_error_msg: assert rt_error_msg == np_error_msg else: assert_array_equal_(np.array(rt_arange), np_arange) assert isinstance(rt_arange, FastArray) # TODO: Delete this test once SOQTEST-6495 is resolved. This way arange's main functionality can still be tested @pytest.mark.xfail( reason="https://jira/browse/SOQTEST-6495 kwargs not implemented in riptable.arange()" ) @pytest.mark.skipif( is_running_in_teamcity(), reason="Please remove alongside xfail removal." ) @given(start_stop_step_strategy()) def test_arange_kwargs(self, start_stop_step): start = start_stop_step["start"] stop = start_stop_step["stop"] step = start_stop_step["step"] # limit step size so lists don't get too long and use up too many resources # These can be smaller than in test_arange, because this one is just confirming the kwargs work max_length = 1000 length = abs((stop - start) / step) # TODO directly draw from this range, instead of using hypothesis assume assume(-1 <= length < max_length) rt_error_msg = "" np_error_msg = "" try: rt_arange = rt.arange(start=start, stop=stop, step=step) except Exception as e: rt_arange = None rt_error_msg = str(e) try: np_arange = np.arange(start=start, stop=stop, step=step) except Exception as e: np_arange = None np_error_msg = str(e) if rt_error_msg and np_error_msg: assert rt_error_msg == np_error_msg else: assert_array_equal_(np.array(rt_arange), np_arange) assert isinstance(rt_arange, FastArray) @given( arrays( shape=ndarray_shape_strategy(), dtype=ints_floats_datetimes_and_timedeltas() ) ) @pytest.mark.skip(reason="Skip if riptable defers to numpy argsort implementation.") def test_argsort(self, arr): axis_list = [None] axis_list.extend(list(range(-1, len(arr.shape)))) sort_algs = ["quicksort", "mergesort", "heapsort", "stable"] for axis in axis_list: for sort_alg in sort_algs: rt_argsort = rt.argsort(arr, axis=axis, kind=sort_alg) np_argsort = np.argsort(arr, axis=axis, kind=sort_alg) assert isinstance(rt_argsort, FastArray) assert_array_equal_(np.array(rt_argsort), np_argsort) @pytest.mark.xfail( reason="https://jira/browse/SOQTEST-6497 riptable.bincount returns numpy array instead of FastArray" ) @pytest.mark.skipif( is_running_in_teamcity(), reason="Please remove alongside xfail removal." ) @given( arr=arrays(dtype=integer_dtypes(), shape=ndarray_shape_strategy(max_rank=1)), use_weights=booleans(), min_length=integers(), data=data(), ) def test_bincount(self, data, arr, use_weights, min_length): weights = None if use_weights: weights = data.draw(arrays(dtype=integer_dtypes(), shape=arr.shape)) rt_err = np_err = rt_bincount = np_bincount = None try: rt_bincount = rt.bincount(arr, weights=weights, minlength=min_length) except Exception as e: rt_err = str(e) try: np_bincount = np.bincount(arr, weights=weights, minlength=min_length) except Exception as e: np_err = str(e) # TODO re-visit this implementation since if-clause will evaluate when there are no exception if rt_err is None or np_err is None: assert rt_err == np_err else: assert_array_equal_(np.array(rt_bincount), np_bincount) assert isinstance(rt_bincount, FastArray) @pytest.mark.xfail( reason="Related to https://jira/browse/SOQTEST-6478 different endiannesses not handled" ) @pytest.mark.skipif( is_running_in_teamcity(), reason="Please remove alongside xfail removal." ) @given( data=data(), arr=arrays( shape=ndarray_shape_strategy(), dtype=floating_dtypes(sizes=(32, 64)) ), ) def test_ceil_array(self, data, arr): # TODO: Modify this to use the 'generate_array_and_where' strategy instead? where_arr = data.draw(arrays(shape=arr.shape, dtype=boolean_dtypes())) # Test #1: Returns new array containing results; no optional parameters provided. rt_output = rt.ceil(arr) np_output = np.ceil(arr) assert isinstance(rt_output, FastArray) assert_array_equal_(np.array(rt_output), np_output) # Test #2: Returns new array containing results; 'where' bitmask is provided. rt_output = rt.ceil(arr, where=where_arr) np_output = np.ceil(arr, where=where_arr) assert isinstance(rt_output, FastArray) assert_array_equal_(np.array(rt_output), np_output) # Test #3: Results written to array specified in 'out' parameter; no other optional params. rt_inplace = np.zeros_like(arr) np_inplace = np.zeros_like(arr) rt_output = rt.ceil(arr, out=rt_inplace) np_output = np.ceil(arr, out=np_inplace) assert_array_equal_(np.array(rt_inplace), np_inplace) # TODO: Add assertions for rt_output and np_output -- what are they expected to return when the 'out' parameter is specified? # assert isinstance(rt_output, FastArray, msg="riptable.ceil() did not return a FastArray") # Test #4: Results written to array specified in 'out' parameter; 'where' bitmask is provided. rt_inplace = np.zeros_like(arr) np_inplace = np.zeros_like(arr) rt_output = rt.ceil(arr, where=where_arr, out=rt_inplace) np_output = np.ceil(arr, where=where_arr, out=np_inplace) assert_array_equal_(np.array(rt_inplace), np_inplace) # TODO: Add assertions for rt_output and np_output -- what are they expected to return when the 'out' parameter is specified? # assert isinstance(rt_output, FastArray, msg="riptable.ceil() did not return a FastArray") @given(scalar=floating_scalar()) def test_ceil_scalar(self, scalar): # test scalars rt_ceil_scalar = rt.ceil(scalar) np_ceil_scalar = np.ceil(scalar) assert_equal_(rt_ceil_scalar, np_ceil_scalar) # TODO fold the concatenate tests by using pytest params. # 20200303 N.B, rt.concatenate defers to np.concatenate. @hypothesis.settings(suppress_health_check=[HealthCheck.too_slow]) @example(tuple_of_arrays=tuple()) @given(tuple_of_arrays=generate_tuples_of_arrays(all_same_width=True)) def test_concatenate_first_axis(self, tuple_of_arrays): rt_err_type = np_err_type = rt_concatenate = np_concatenate = None # Capture the output if evaluation succeeds, otherwise capture the error type. try: rt_concatenate = rt.concatenate(tuple_of_arrays, axis=0) except Exception as e: rt_err_type = type(e) try: np_concatenate = np.concatenate(tuple_of_arrays, axis=0) except Exception as e: np_err_type = type(e) # The concatenated arrays should be equal to a tolerance and if any exceptions were raised # they should be the same type. if rt_err_type and np_err_type: assert rt_err_type == np_err_type else: assert isinstance(rt_concatenate, FastArray) assert_array_equal_(np.array(rt_concatenate), np_concatenate) @hypothesis.settings(suppress_health_check=[HealthCheck.too_slow]) @example(tuple_of_arrays=tuple()) @given(tuple_of_arrays=generate_tuples_of_arrays(all_same_width=True)) def test_concatenate_flatten(self, tuple_of_arrays): rt_err_type = np_err_type = rt_concatenate = np_concatenate = None # Capture the output if evaluation succeeds, otherwise capture the error type. try: rt_concatenate = rt.concatenate(tuple_of_arrays, axis=None) except Exception as e: rt_err_type = type(e) try: np_concatenate = np.concatenate(tuple_of_arrays, axis=None) except Exception as e: np_err_type = type(e) # The concatenated arrays should be equal to a tolerance and if any exceptions were raised # they should be the same type. if rt_err_type and np_err_type: assert rt_err_type == np_err_type, f"Exceptions should be the same type." else: assert isinstance(rt_concatenate, FastArray) assert_array_equal_(np.array(rt_concatenate), np_concatenate) @hypothesis.settings(suppress_health_check=[HealthCheck.too_slow]) @example(tuple_of_arrays=tuple()) @given(tuple_of_arrays=generate_tuples_of_arrays(all_same_height=True)) def test_concatenate_second_axis(self, tuple_of_arrays): rt_err_type = np_err_type = rt_concatenate = np_concatenate = None # Capture the output if evaluation succeeds, otherwise capture the error type. try: rt_concatenate = rt.concatenate(tuple_of_arrays, axis=1) except ValueError as e: rt_err_type = ValueError try: np_concatenate = np.concatenate(tuple_of_arrays, axis=1) except ValueError as e: np_err_type = ValueError # The concatenated arrays should be equal to a tolerance and if any exceptions were raised # they should be the same type. if rt_err_type and np_err_type: assert rt_err_type == np_err_type else: assert isinstance(rt_concatenate, FastArray) assert_array_equal_(np.array(rt_concatenate), np_concatenate) @given( array_and_axis=generate_array_and_axis( shape=ndarray_shape_strategy(), dtype=ints_floats_or_complex_dtypes() ), output_dtype=ints_floats_or_complex_dtypes(), ) def test_cumsum(self, array_and_axis, output_dtype): arr, axis = array_and_axis output_dtype = output_dtype rt_cumsum = rt.cumsum(arr, axis=axis) # 20200303 defers to numpy np_cumsum = np.cumsum(arr, axis=axis) assert isinstance(rt_cumsum, FastArray) assert_array_equal_(np.array(rt_cumsum), np_cumsum) rt_cumsum = rt.cumsum(arr, axis=axis, dtype=output_dtype) np_cumsum = np.cumsum(arr, axis=axis, dtype=output_dtype) assert isinstance(rt_cumsum, FastArray) assert ( rt_cumsum.dtype == output_dtype ), f"Dtype should be the same as input array." assert_array_equal_(np.array(rt_cumsum), np_cumsum) @given( array_and_axis=generate_array_and_axis( shape=ndarray_shape_strategy(), dtype=ints_floats_or_complex_dtypes(), # If not specified, np.diff uses -1 as the default axis. default_axis=-1 ), # 'diff_iters': the number of differencing iterations the 'diff' function will perform. # This is kind of like the "divided differences" algorithm in that it's recursive, but # there's no division step. Specifying a large number of iterations for this is prohibitively # expensive and will cause the test to time out; we can test with a small-ish number of # iterations and still have good confidence we're covering all the important code paths. diff_iters=integers(min_value=0, max_value=8) ) def test_diff(self, array_and_axis, diff_iters: int): arr, axis = array_and_axis # Record some events so when hypothesis reports test statistics we'll be able # to see roughly which parts of the search space were covered. event(f'dtype = {np.dtype(arr.dtype).name}') event(f'ndim = {len(arr.shape)}') # If we'll have to clamp the number of differences below for any of the array's axes, # record that as an event too -- so we'll know if we're running into that too often. if min(arr.shape) <= diff_iters: event("Clamped diff_iters for at least one axis of the array.") # We've drawn a random integer to use for the number of differencing steps to perform. # If the length of the current array axis is smaller, clamp it here to avoid an error. num_diffs = min(arr.shape[axis], diff_iters) # TODO parameterize over kwargs; but don't loop here, it'll be too slow. # Draw the kwargs values (or None) from hypothesis strategies and let it # decide how to search through the parameter space. rt_diff = rt.diff(arr, axis=axis, n=num_diffs) # as of 20200303 defers to numpy (i.e. no riptable-specific implementation) np_diff = np.diff(arr, axis=axis, n=num_diffs) assert isinstance(rt_diff, FastArray) assert_array_equal_(np.array(rt_diff), np_diff) @given(examples=generate_sample_test_floats()) def test_double(self, examples): for i in examples: rt_double = rt.double(i) np_double = np.double(i) assert_equal_(rt_double, np_double) assert isinstance(rt_double, type(np_double)) rt_plus = rt_double + 1 np_plus = np_double + 1 assert_equal_(rt_plus, np_plus) assert isinstance(rt_plus, type(np_plus)) rt_minus = rt_double - 1 np_minus = np_double - 1 assert_equal_(rt_minus, np_minus) assert isinstance(rt_minus, type(np_minus)) rt_square = rt_double ** 2 np_square = np_double ** 2 assert_equal_(rt_square, np_square) assert isinstance(rt_square, type(np_square)) rt_sqrt = np.sqrt(rt_double) np_sqrt = np.sqrt(np_double) assert_equal_(rt_sqrt, np_sqrt) isinstance(rt_sqrt, type(np_sqrt)) @given( shape=ndarray_shape_strategy(), dtype=ints_floats_datetimes_and_timedeltas(), order=sampled_from(("F", "C")), ) def test_empty(self, shape, dtype, order): # Empty does not initialize the values in the array, so it cannot be compared to a numpy array element-wise rt_empty = rt.empty(shape=shape, dtype=dtype, order=order) assert isinstance(rt_empty, FastArray) assert_equal_(shape, rt_empty.shape) assert_equal_(dtype.type, rt_empty.dtype.type) # 1-D arrays always use Column-order. Otherwise, use the order specified if len(rt_empty.shape) > 1 and min(rt_empty.shape) > 1: assert np.isfortran(rt_empty) == (order == "F") else: assert not np.isfortran(rt_empty) # TODO pull the non-subok parts so these are running tests @pytest.mark.xfail( reason="https://jira/browse/SOQTEST-6563 riptable does not implement subok" ) @pytest.mark.skipif( is_running_in_teamcity(), reason="Please remove alongside xfail removal." ) @given( arr=arrays( shape=ndarray_shape_strategy(), dtype=ints_floats_datetimes_and_timedeltas() ), dtype=ints_floats_or_complex_dtypes(), order=sampled_from(("C", "F", "K", "A")), subok=booleans(), ) def test_empty_like(self, arr, dtype, order, subok): # TODO argument sweep rt_empty_like = rt.empty_like(arr, dtype=dtype, order=order, subok=subok) assert_equal_(dtype, rt_empty_like.dtype) assert_equal_(rt_empty_like.shape, arr.shape) # 1-D arrays always use Column-order. Otherwise, use the order specified if ( len(rt_empty_like.shape) > 1 and min(rt_empty_like.shape) > 1 and (order == "F" or ((order == "A" or order == "K") and np.isfortran(arr))) ): assert np.isfortran(rt_empty_like) else: assert not np.isfortran(rt_empty_like) if subok: assert isinstance(rt_empty_like, FastArray) else: assert isinstance(rt_empty_like, np.ndarray) # FastArray is a subclass of np.ndarray, so also ensure it is not a FastArray assert not isinstance(rt_empty_like, FastArray) @given(examples=generate_sample_test_floats()) def test_float32(self, examples): for i in examples: rt_float32 = rt.float32(i) np_float32 = np.float32(i) assert_equal_(rt_float32, np_float32) assert isinstance(rt_float32, type(np_float32)) rt_plus = rt_float32 + 1 np_plus = np_float32 + 1 assert_equal_(rt_plus, np_plus) assert isinstance(rt_plus, type(np_plus)) rt_minus = rt_float32 - 1 np_minus = np_float32 - 1 assert_equal_(rt_minus, np_minus) assert isinstance(rt_minus, type(np_minus)) rt_square = rt_float32 ** 2 np_square = np_float32 ** 2 assert_equal_(rt_square, np_square) assert isinstance(rt_square, type(np_square)) rt_sqrt = np.sqrt(rt_float32) np_sqrt = np.sqrt(np_float32) assert_equal_(rt_sqrt, np_sqrt) assert isinstance(rt_sqrt, type(np_sqrt)) @given(examples=generate_sample_test_floats()) def test_float64(self, examples): for i in examples: rt_float64 = rt.float64(i) np_float64 = np.float64(i) assert_equal_(rt_float64, np_float64) assert isinstance(rt_float64, type(np_float64)) rt_plus = rt_float64 + 1 np_plus = np_float64 + 1 assert_equal_(rt_plus, np_plus) assert isinstance(rt_plus, type(np_plus)) rt_minus = rt_float64 - 1 np_minus = np_float64 - 1 assert_equal_(rt_minus, np_minus) assert isinstance(rt_minus, type(np_minus)) rt_square = rt_float64 ** 2 np_square = np_float64 ** 2 assert_equal_(rt_square, np_square) assert isinstance(rt_square, type(np_square)) rt_sqrt = np.sqrt(rt_float64) np_sqrt = np.sqrt(np_float64) assert_equal_(rt_sqrt, np_sqrt) assert isinstance(rt_sqrt, type(np_sqrt)) @pytest.mark.xfail( reason="Related to https://jira/browse/SOQTEST-6478 different endiannesses not handled" ) @given( data=data(), arr=arrays( shape=ndarray_shape_strategy(), dtype=floating_dtypes(sizes=(32, 64)) ), ) @pytest.mark.skipif( is_running_in_teamcity(), reason="Please remove alongside xfail removal." ) def test_floor_array(self, data, arr): # TODO: Modify this to use the 'generate_array_and_where' strategy instead? where_arr = data.draw(arrays(shape=arr.shape, dtype=boolean_dtypes())) # Test #1: Returns new array containing results; no optional parameters provided. rt_output = rt.floor(arr) np_output = np.floor(arr) assert isinstance(rt_output, FastArray) assert_array_equal_(np.array(rt_output), np_output) # Test #2: Returns new array containing results; 'where' bitmask is provided. rt_output = rt.floor(arr, where=where_arr) np_output = np.floor(arr, where=where_arr) assert isinstance(rt_output, FastArray) assert_array_equal_(np.array(rt_output), np_output) # Test #3: Results written to array specified in 'out' parameter; no other optional params. rt_inplace = np.zeros_like(arr) np_inplace = np.zeros_like(arr) rt_output = rt.floor(arr, out=rt_inplace) np_output = np.floor(arr, out=np_inplace) assert_array_equal_(np.array(rt_inplace), np_inplace) # TODO: Add assertions for rt_output and np_output -- what are they expected to return when the 'out' parameter is specified? # assert isinstance(rt_output, FastArray, msg="riptable.floor() did not return a FastArray") # Test #4: Results written to array specified in 'out' parameter; 'where' bitmask is provided. rt_inplace = np.zeros_like(arr) np_inplace = np.zeros_like(arr) rt_output = rt.floor(arr, where=where_arr, out=rt_inplace) np_output = np.floor(arr, where=where_arr, out=np_inplace) assert_array_equal_(np.array(rt_inplace), np_inplace) # TODO: Add assertions for rt_output and np_output -- what are they expected to return when the 'out' parameter is specified? # assert isinstance(rt_output, FastArray, msg="riptable.floor() did not return a FastArray") @given(scalar=floating_scalar()) def test_floor_scalar(self, scalar): # test scalars rt_floor_scalar = rt.floor(scalar) np_floor_scalar = np.floor(scalar) assert_equal_(np_floor_scalar, rt_floor_scalar) @given( shape=ndarray_shape_strategy(), fill_value=ints_or_floats_example(), dtype=ints_or_floats_dtypes(), order=sampled_from(("C", "F")), ) def test_full(self, shape, fill_value, dtype, order): rt_err_type = np_err_type = rt_full = np_full = None try: rt_full = rt.full(shape, fill_value, dtype=dtype, order=order) except Exception as e: rt_err_type = type(e) try: np_full = np.full( shape=shape, fill_value=fill_value, dtype=dtype, order=order ) except Exception as e: np_err_type = type(e) if rt_err_type and np_err_type: assert rt_err_type == np_err_type else: assert_array_equal_(np.array(rt_full), np_full) assert isinstance(rt_full, FastArray) assert np.isfortran(rt_full) == np.isfortran(np_full) @given( shape=ndarray_shape_strategy(), fill_value=text(), order=sampled_from(("C", "F")), ) def test_full_str(self, shape, fill_value, order): rt_full = rt.full(shape, fill_value, order=order) np_full = np.full(shape, fill_value, order=order) assert_array_equal_(np.array(rt_full), np_full) assert isinstance(rt_full, FastArray) assert np.isfortran(rt_full) == np.isfortran(np_full) @pytest.mark.xfail( reason="https://jira/browse/SOQTEST-6495 kwargs not implemented in riptable.full()", raises=ValueError, ) @pytest.mark.skipif( is_running_in_teamcity(), reason="Please remove alongside xfail removal." ) @given( shape=ndarray_shape_strategy(), fill_value=ints_or_floats_example(), dtype=ints_or_floats_dtypes(), order=sampled_from(("C", "F")), ) def test_full_kwargs(self, shape, fill_value, dtype, order): rt_err_type = np_err_type = rt_full = np_full = None try: rt_full = rt.full( shape=shape, fill_value=fill_value, dtype=dtype, order=order ) except Exception as e: rt_err_type = type(e) try: np_full = np.full( shape=shape, fill_value=fill_value, dtype=dtype, order=order ) except Exception as e: np_err_type = type(e) if rt_err_type and np_err_type: assert rt_err_type == np_err_type else: assert_array_equal_(np.array(rt_full), np_full) assert isinstance(rt_full, FastArray) assert np.isfortran(rt_full) == np.isfortran(np_full) @hypothesis.settings(suppress_health_check=[HealthCheck.too_slow]) @given(tuple_of_arrays=generate_tuples_of_arrays(all_same_height=True)) def test_hstack(self, tuple_of_arrays): rt_err_type = np_err_type = rt_hstack = np_hstack = None try: rt_hstack = rt.hstack(tuple_of_arrays) except Exception as e: rt_err_type = type(e) try: np_hstack = np.hstack(tuple_of_arrays) except Exception as e: np_err_type = type(e) if rt_err_type and np_err_type: assert rt_err_type == np_err_type else: assert isinstance(rt_hstack, FastArray) assert_array_equal_(np.array(rt_hstack), np_hstack) @given(examples=generate_sample_test_integers(num_bits=8, signed=True)) def test_int8(self, examples): for i in examples: rt_int8 = rt.int8(i) np_int8 = np.int8(i) assert_equal_(rt_int8, np_int8) assert isinstance(rt_int8, type(np_int8)) rt_plus = rt_int8 + 1 np_plus = np_int8 + 1 assert_equal_(rt_plus, np_plus) assert isinstance(rt_plus, type(np_plus)) rt_minus = rt_int8 - 1 np_minus = np_int8 - 1 assert_equal_(rt_minus, np_minus) assert isinstance(rt_minus, type(np_minus)) @given(examples=generate_sample_test_integers(num_bits=16, signed=True)) def test_int16(self, examples): for i in examples: rt_int16 = rt.int16(i) np_int16 = np.int16(i) assert_equal_(rt_int16, np_int16) assert isinstance(rt_int16, type(np_int16)) rt_plus = rt_int16 + 1 np_plus = np_int16 + 1 assert_equal_(rt_plus, np_plus) assert isinstance(rt_plus, type(np_plus)) rt_minus = rt_int16 - 1 np_minus = np_int16 - 1 assert_equal_(rt_minus, np_minus) assert isinstance(rt_minus, type(np_minus)) @given(examples=generate_sample_test_integers(num_bits=32, signed=True)) def test_int32(self, examples): for i in examples: rt_err = None np_err = None try: rt_int32 = rt.int32(i) except BaseException as e: rt_err = str(e) try: np_int32 = np.int32(i) except BaseException as e: np_err = str(e) if rt_err or np_err: assert rt_err == np_err else: assert_equal_(rt_int32, np_int32) assert isinstance(rt_int32, type(np_int32)) rt_plus = rt_int32 + 1 np_plus = np_int32 + 1 assert_equal_(rt_plus, np_plus) assert isinstance(rt_plus, type(np_plus)) rt_minus = rt_int32 - 1 np_minus = np_int32 - 1 assert_equal_(rt_minus, np_minus) assert isinstance(rt_minus, type(np_minus)) @given(examples=generate_sample_test_integers(num_bits=64, signed=True)) def test_int64(self, examples): for i in examples: rt_err = None np_err = None try: rt_int64 = rt.int64(i) except BaseException as e: rt_err = str(e) try: np_int64 = np.int64(i) except BaseException as e: np_err = str(e) if rt_err or np_err: assert rt_err == np_err else: assert_equal_(rt_int64, np_int64) assert isinstance(rt_int64, type(np_int64)) rt_plus = rt_int64 + 1 np_plus = np_int64 + 1 assert_equal_(rt_plus, np_plus) assert isinstance(rt_plus, type(np_plus)) rt_minus = rt_int64 - 1 np_minus = np_int64 - 1 assert_equal_(rt_minus, np_minus) assert isinstance(rt_minus, type(np_minus)) @given(arg_dict=interpolation_data()) def test_interp(self, arg_dict): rt_err = np_err = rt_interp = np_interp = None try: rt_interp = rt.interp( arg_dict["x"], arg_dict["xp"], arg_dict["fp"] ) # According to the documentation, Riptable # does not implement kwargs left, right, # and period except Exception as e: rt_err = e try: np_interp = np.interp(arg_dict["x"], arg_dict["xp"], arg_dict["fp"]) except Exception as e: np_err = e if rt_err or np_err: assert rt_err == np_err else: assert_equal_(rt_interp.dtype.name, np_interp.dtype.name) assert_allclose(np.array(rt_interp), np_interp) assert isinstance(rt_interp, FastArray) @given( arr=arrays( shape=ndarray_shape_strategy(), dtype=floating_dtypes(endianness="=") ) ) def test_isfinite(self, arr): rt_isfinite = rt.isfinite(arr) np_isfinite = np.isfinite(arr) assert_array_equal_(rt_isfinite, np_isfinite) assert isinstance(rt_isfinite, FastArray) @given( arr=arrays( shape=ndarray_shape_strategy(), dtype=floating_dtypes(endianness="=") ) ) def test_isinf(self, arr): rt_isinf = rt.isinf(arr) np_isinf = np.isinf(arr) assert_array_equal_(rt_isinf, np_isinf) assert isinstance(rt_isinf, FastArray) @given( arr=arrays( shape=ndarray_shape_strategy(), dtype=floating_dtypes(endianness="=") ) ) def test_isnan(self, arr): rt_isnan = rt.isnan(arr) np_isnan = np.isnan(arr) assert_array_equal_(rt_isnan, np_isnan) assert isinstance(rt_isnan, FastArray) @pytest.mark.xfail( reason="RIP-339: lexsort broken for ndarray and FastArray input types" ) @pytest.mark.skipif( is_running_in_teamcity(), reason="Performance issues when running on TeamCity" ) @given(data()) def test_lexsort_basic(self, data): shape, dtype = ( shared(ndarray_shape_strategy(max_rank=1)), shared(ints_floats_datetimes_and_timedeltas()), ) a = data.draw(arrays(shape=shape, dtype=dtype)) b = data.draw(arrays(shape=shape, dtype=dtype)) assert_array_equal_(np.lexsort((b, a)), rt.lexsort((list(b), list(a)))) @pytest.mark.xfail( reason="RIP-339: lexsort broken for ndarray and FastArray input types" ) @pytest.mark.skipif( is_running_in_teamcity(), reason="Performance issues when running on TeamCity" ) @given(data()) def test_lexsort_basic_fail(self, data): shape, dtype = ( shared(ndarray_shape_strategy(max_rank=1)), shared(ints_or_floats_dtypes()), ) a = data.draw(arrays(shape=shape, dtype=dtype)) b = data.draw(arrays(shape=shape, dtype=dtype)) rt_lexsort_args = [(FA(b), FA(a)), (FA(b), a), (b, FA(a)), (b, a)] for rt_arg in rt_lexsort_args: assert_array_equal_(np.lexsort((b, a)), rt.lexsort(rt_arg)) @pytest.mark.xfail( reason="RIP-339: lexsort broken for ndarray and FastArray input types" ) @pytest.mark.skipif( is_running_in_teamcity(), reason="Performance issues when running on TeamCity" ) @given(data()) def test_lexsort_mixed(self, data): shape, dtype = ( shared(ndarray_shape_strategy(max_rank=1)), ints_floats_datetimes_and_timedeltas(), ) a = data.draw(arrays(shape=shape, dtype=dtype)) b = data.draw(arrays(shape=shape, dtype=dtype)) assert_array_equal_(np.lexsort((b, a)), rt.lexsort((list(b), list(a)))) @pytest.mark.xfail( reason="RIP-339: lexsort broken for ndarray and FastArray input types" ) @pytest.mark.skipif( is_running_in_teamcity(), reason="Performance issues when running on TeamCity" ) @given(data()) def test_lexsort_mixed_fail(self, data): shape, dtype = ( shared(ndarray_shape_strategy(max_rank=1)), ints_floats_datetimes_and_timedeltas(), ) a = data.draw(arrays(shape=shape, dtype=dtype)) b = data.draw(arrays(shape=shape, dtype=dtype)) rt_lexsort_args = [(FA(b), FA(a)), (FA(b), a), (b, FA(a)), (b, a)] for rt_arg in rt_lexsort_args: assert_array_equal_(np.lexsort((b, a)), rt.lexsort(rt_arg)) @pytest.mark.xfail( reason="RIP-339: lexsort broken for ndarray and FastArray input types" ) @pytest.mark.skipif( is_running_in_teamcity(), reason="Performance issues when running on TeamCity" ) @given(data()) def test_lexsort_many_columns(self, data): k = data.draw(integers(min_value=1, max_value=_MAX_SHAPE_SIZE // 10)) shape, dtype = ( shared(ndarray_shape_strategy(max_rank=1)), shared(integer_dtypes(endianness="=")), ) keys: List[List[int]] = list() for _ in range(k): keys.append(list(data.draw(arrays(shape=shape, dtype=dtype)))) assert_array_equal_(np.lexsort(keys), rt.lexsort(keys)) @pytest.mark.xfail( reason="RIP-339: lexsort broken for ndarray and FastArray input types" ) @pytest.mark.skipif( is_running_in_teamcity(), reason="Performance issues when running on TeamCity" ) @given(data()) def test_lexsort_many_columns_fail(self, data): k = data.draw(integers(min_value=1, max_value=_MAX_SHAPE_SIZE // 10)) shape, dtype = ( shared(ndarray_shape_strategy(max_rank=1)), shared(integer_dtypes(endianness="=")), ) keys: List[FastArray] = list() for _ in range(k): keys.append(FA(data.draw(arrays(shape=shape, dtype=dtype)))) assert_array_equal_(np.lexsort(keys), rt.lexsort(keys)) @given( arr=arrays( shape=ndarray_shape_strategy(), dtype=floating_dtypes(endianness="=") ) ) def test_log_array(self, arr): rt_log = rt.log(arr) np_log = np.log(arr) assert_allclose(np.array(rt_log), np_log, rtol=1e-6) assert isinstance(rt_log, FastArray) @given(scalar=floats()) def test_log_scalar(self, scalar): rt_log = rt.log(scalar) np_log = np.log(scalar) assert_allclose(rt_log, np_log, rtol=1e-6) @given( arr=arrays( shape=ndarray_shape_strategy(), dtype=floating_dtypes(endianness="=") ) ) def test_log10_array(self, arr): rt_log10 = rt.log10(arr) np_log10 = np.log10(arr) assert_allclose(np.array(rt_log10), np_log10, rtol=1e-6) assert isinstance(rt_log10, FastArray) @given(scalar=floats()) def test_log10_scalar(self, scalar): rt_log10 = rt.log10(scalar) np_log10 = np.log10(scalar) assert_allclose(rt_log10, np_log10, rtol=1e-6) @pytest.mark.xfail( reason="This test exposes a known bug around NaN-handling in 'max' that needs to be fixed." ) @given( arr_axis_tuple=generate_array_and_axis( shape=ndarray_shape_strategy(), dtype=ints_floats_datetimes_and_timedeltas() ) ) @given(mats=same_structure_ndarrays()) def test_maximum(self, mats): mat1, mat2 = mats np_output = np.maximum(mat1, mat2) rt_output = rt.maximum(mat1, mat2) assert_array_equal_(np.array(rt_output), np_output) # TODO: Enable this assertion once we've fixed riptable so rt.maximum returns a FastArray. # assert isinstance(rt_output, FastArray) @given( arr_axis_tuple=generate_array_and_axis( shape=ndarray_shape_strategy(), dtype=ints_floats_datetimes_and_timedeltas() ) ) def test_min(self, arr_axis_tuple): arr, axis = arr_axis_tuple rt_output = rt.min(arr, axis=axis) np_output = np.min(arr, axis=axis) assert_array_equal_(np.array(rt_output), np_output) # TODO: Enable this assertion once we've fixed riptable so rt.maximum returns a FastArray. # if axis: #if axis is None, it will return a scalar # assert isinstance(rt_output, FastArray) @pytest.mark.parametrize( "func", [ # TODO add params: nanmean, nansum, nanstd, nanvar, nanargmin, nanargmax # TODO add other missing array methods, then dynamically generate these functions from introspection of numpy and riptable # List of ndarray array methods: https://docs.scipy.org/doc/numpy/reference/arrays.ndarray.html#array-methods "abs", "all", "any", "argmax", "argmin", "argsort", # TODO add cumprod "cumsum", "mean", "median", "nanmedian", "percentile", "std", "var", pytest.param( "sum", marks=[ pytest.mark.xfail( reason="Need to investigate intermittent failures possibly due to roundoff errors." ) ], ), pytest.param( "nanmax", marks=[ pytest.mark.xfail( reason="RIP-417: This test exposes a known bug around NaN-handling in 'nanmax' that needs to be fixed." ) ], ), pytest.param( "nanmin", marks=[ pytest.mark.xfail( reason="RIP-417: This test exposes a known bug around NaN-handling in 'nanmin' that needs to be fixed." ) ], ), pytest.param( "max", marks=[ pytest.mark.xfail( reason="Conflict in comparing FastArray with invalids that don't exist as ndarray invalids. Need to handle invalid assertion checks." ) ], ), pytest.param( "nanpercentile", marks=[ pytest.mark.xfail( reason="Investigate frequent TeamCity hypothesis DeadlineExceeded exceptions that never occur on local runs" ) ], ), ], ) @pytest.mark.parametrize( "include_invalid", [ False ], # TODO enable including invalids and use masked arrays to check valid and possibly invalid values ) def test_array_methods(self, func, include_invalid): # TODO rework so the generated array and axis can be cached and reused, otherwise most of the time spent for this test is generating arrays and an axis # Attempted to use the hypothesis cacheable decorator, but it doesn't cache the strategy for reuse # Investigate reworking generate_array_and_axis by using the defines_strategy_with_reusable_values decorator @given( arr_axis_tuple=generate_array_and_axis( shape=ndarray_shape_strategy(), dtype=ints_or_floats_dtypes(), include_invalid=include_invalid, ) ) def inner(arr_axis_tuple): arr, axis = arr_axis_tuple # TODO enable this assertion for guarded funcs once we've fixed riptable so these return a fastarray. def _check_fastarray(inst): if ( axis ): # need an axis to check if output is a FastArray, otherwise output is a scalar assert isinstance(inst, rt.FastArray) args = (func, arr) kwargs = {} if func not in ["abs"]: kwargs["axis"] = axis # set up keyword arguments needed for some of the rt_numpy functions if "std" in func or "var" in func: kwargs["ddof"] = 1 if "percentile" in func: kwargs["interpolation"] = "linear" kwargs["q"] = 50 if "argsort" == func: kwargs["kind"] = "stable" # Test #1: Check calling .func() as a module function in numpy vs. riptable. rt_output = operator.methodcaller(*args, **kwargs)(rt) np_output = operator.methodcaller(*args, **kwargs)(np) assert_array_equal_(rt_output, np_output) # Test #2: Check calling .func() as a module function in numpy vs. riptable. # This time, explicitly wrap the ndarray generated by hypothesis in a FastArray # so we're sure to get the FastArray implementation. f_arr = rt.FA(arr) args = (func, f_arr) rt_output = operator.methodcaller(*args, **kwargs)(rt) np_output = operator.methodcaller(*args, **kwargs)(np) assert_array_equal_(rt_output, np_output) # _check_fastarray(rt_output) # rt_output should be a FastArray, but is a ndarray # Test #3: Call the .func() method on both the FastArray and the ndarray. # Either FastArray or ndarray does not support the guarded member names. common_members, _ = _NDARRAY_FASTARRAY_COMMON_AND_DIFF if func in common_members: rt_output = operator.methodcaller(func, **kwargs)(f_arr) np_output = operator.methodcaller(func, **kwargs)(arr) assert_array_equal_(rt_output, np_output) # _check_fastarray(rt_output) # rt_output should be a FastArray, but is a ndarray inner() @given(arrs=sampled_from([[9, 8, 7], [[10, 1],[11, 2],[13, 3]]]), inds=just([False, True, False])) def test_array_boolean_indexing(self, arrs, inds): np_array = np.asfortranarray(arrs) rt_array = rt.FA(np_array) np_bool_indices = inds rt_bool_indices = rt.FA(np_bool_indices) nr = np_array[np_bool_indices] fr = rt_array[rt_bool_indices] assert_array_equal_(np.array(fr), nr) @given(mats=same_structure_ndarrays()) def test_minimum(self, mats): mat1, mat2 = mats np_output = np.minimum(mat1, mat2) rt_output = rt.minimum(mat1, mat2) assert_array_equal_(np.array(rt_output), np_output) # TODO: Enable this assertion once we've fixed riptable so rt.minimum returns a FastArray. # assert isinstance(rt_output, FastArray) @pytest.mark.xfail( reason="https://jira/browse/SOQTEST-6497 Riptable.nan_to_num() does not return a FastArray", raises=AssertionError, ) @pytest.mark.skipif( is_running_in_teamcity(), reason="Please remove alongside xfail removal." ) @given(array=arrays(shape=ndarray_shape_strategy(), dtype=floating_dtypes())) def test_nan_to_num_array(self, array): rt_nan_to_num = rt.nan_to_num(array) np_nan_to_num = np.nan_to_num(array) assert_allclose(rt_nan_to_num, np_nan_to_num) assert isinstance(rt_nan_to_num, FastArray) @given(scalar=floats()) def test_nan_to_num_scalar(self, scalar): rt_nan_to_num = rt.nan_to_num(scalar) np_nan_to_num = np.nan_to_num(scalar) assert_allclose(rt_nan_to_num, np_nan_to_num) @given( arr=arrays(shape=ndarray_shape_strategy(), dtype=floating_dtypes()), q=floats(min_value=0, max_value=100), ) def test_nanpercentile_single(self, arr, q): interpolations = ["linear", "lower", "higher", "midpoint", "nearest"] for interpolation in interpolations: # todo - rt_nanpercentile should come from riptable rt_nanpercentile = np.nanpercentile(arr, q=q, interpolation=interpolation) np_nanpercentile = np.nanpercentile(arr, q=q, interpolation=interpolation) assert_allclose(rt_nanpercentile, np_nanpercentile) # @given(arr=arrays(shape=ndarray_shape_strategy(), dtype=floating_dtypes()), q=floats(min_value=0, max_value=100)) # def test_nanpercentile_single(self, arr, q): # interpolations = ['linear', 'lower', 'higher', 'midpoint', 'nearest'] # for interpolation in interpolations: # # todo - rt_nanpercentile should come from riptable # rt_nanpercentile = np.nanpercentile(arr, q=q, interpolation=interpolation) # np_nanpercentile = np.nanpercentile(arr, q=q, interpolation=interpolation) # assert_allclose(rt_nanpercentile, np_nanpercentile) @pytest.mark.xfail( reason="https://jira/browse/SOQTEST-6497 Riptable.nanpercentile() does not return a FastArray", raises=AssertionError, ) @pytest.mark.skipif( is_running_in_teamcity(), reason="Please remove alongside xfail removal." ) @given( arr=arrays(shape=ndarray_shape_strategy(), dtype=floating_dtypes()), q=lists( floats(min_value=0, max_value=100), min_size=10, max_size=50, unique=True ), ) def test_nanpercentile_array(self, arr, q): interpolations = ["linear", "lower", "higher", "midpoint", "nearest"] for interpolation in interpolations: # todo - rt_nanpercentile should come from riptable rt_output = np.nanpercentile(arr, q=q, interpolation=interpolation) np_output = np.nanpercentile(arr, q=q, interpolation=interpolation) assert_allclose(rt_output, np_output) if len(arr.shape) > 1: assert isinstance(rt_output, FastArray) @pytest.mark.xfail( reason= "https://jira/browse/SOQTEST-6637 Riptable does not convert the array to a FastArray, so it does not guarantee nanstd will be an available attribute\n" "https://jira/browse/SOQTEST-6497 Riptable.nanstd() does not return a FastArray" , raises=(AttributeError, AssertionError), ) @pytest.mark.skipif( is_running_in_teamcity(), reason="Please remove alongside xfail removal." ) @given(arr_axis_and_ddof=generate_array_axis_and_ddof()) def test_nanstd(self, arr_axis_and_ddof): """ This function has a different default behavior from numpy. Riptable, like pandas, has a default ddof of 1, while numpy uses 0. """ arr = arr_axis_and_ddof["arr"] axis = arr_axis_and_ddof["axis"] ddof = arr_axis_and_ddof["ddof"] rt_output = rt.nanstd(arr, axis=axis, ddof=ddof) np_output = np.nanstd(arr, axis=axis, ddof=ddof) assert_allclose(rt_output, np_output) if axis and len(arr.shape) > 1: assert isinstance(rt_output, FastArray) @pytest.mark.xfail( reason= "https://jira/browse/SOQTEST-6637 Riptable does not convert the array to a FastArray, so it does not guarantee nansum will be an available attribute\n" "nansum does not implement the axis argument either", ) @pytest.mark.skipif( is_running_in_teamcity(), reason="Please remove alongside xfail removal." ) @given( array_and_axis=generate_array_and_axis( shape=ndarray_shape_strategy(max_rank=1), dtype=ints_or_floats_dtypes() ) ) def test_nansum(self, array_and_axis): """ riptable.nansum() does not yet implement the axis argument, since riptable is mostly focused on 1D FastArrays. test_nansum_1D only selects arrays that are rank 0 or 1. Riptable's nansum, like nanmean, does not cast the array passed in to a FastArray--when a FastArray is not passed in an Attribute exception occurs because the function tries calling array.nansum(). """ arr, axis = array_and_axis arr = FastArray(arr) rt_output = rt.nansum(arr, axis=axis) np_output = np.nansum(arr, axis=axis) assert_allclose(rt_output, np_output) if axis: assert isinstance(rt_output, FastArray) # The floats should have the same byte-order as the machine at the moment (https://jira/browse/SOQTEST-6478) -> endianness="=" # and riptable only officially supports float32 and float64 floating-point datatypes, so using other ones occasionally # leads to errors. # Split up ints and floats to ensure the same datatypes for widening. (int->int64, float->float64) @pytest.mark.skip( reason="When the array has very large numbers and numbers closed to zero, numpy and riptable can produce different results." ) @given( arr=arrays( shape=ndarray_shape_strategy(max_rank=1), dtype=floating_dtypes(endianness="=", sizes=(32, 64)), ) ) def test_nansum_1D_float(self, arr): arr = FastArray(arr) # TODO: numpy is not widening the output array as expected to 64 bits all the time, so it is being forced here. rt_output = rt.nansum(arr, axis=None, dtype=np.float64) np_output = np.nansum(arr, axis=None, dtype=np.float64) assert_allclose(rt_output, np_output) # The floats should have the same byte-order as the machine at the moment (https://jira/browse/SOQTEST-6478) -> endianness="=" # Split up ints and floats to ensure the same datatypes for widening. (int->int64, float->float64) @pytest.mark.xfail( reason="https://jira/browse/SOQTEST-6670 Nansum does not follow the dtype argument and returns a float64" ) @pytest.mark.skipif( is_running_in_teamcity(), reason="Please remove alongside xfail removal." ) @given( arr=arrays( shape=ndarray_shape_strategy(max_rank=1), dtype=integer_dtypes(endianness="="), ) ) def test_nansum_1D_int(self, arr): arr = FastArray(arr) # TODO: numpy is not widening the output array as expected to 64 bits all the time, so it is being forced here. rt_output = rt.nansum(arr, axis=None, dtype=np.int64) np_output = np.nansum(arr, axis=None, dtype=np.int64) assert rt_output.dtype.type == np.int64 assert_allclose(rt_output, np_output) @pytest.mark.xfail( reason= "https://jira/browse/SOQTEST-6637 Riptable does not convert the array to a FastArray, so it does not guarantee nanvar will be an available attribute\n" "https://jira/browse/SOQTEST-6497 Riptable.nanvar() does not return a FastArray" , raises=(AttributeError, AssertionError), ) @pytest.mark.skipif( is_running_in_teamcity(), reason="Please remove alongside xfail removal." ) @given(arr_axis_and_ddof=generate_array_axis_and_ddof()) def test_nanvar(self, arr_axis_and_ddof): arr = arr_axis_and_ddof["arr"] axis = arr_axis_and_ddof["axis"] ddof = arr_axis_and_ddof["ddof"] rt_output = rt.nanvar(arr, axis=axis, ddof=ddof) np_output = np.nanvar(arr, axis=axis, ddof=ddof) assert_allclose(rt_output, np_output) if axis and len(arr.shape) > 1: assert isinstance(rt_output, FastArray) @given( shape=ndarray_shape_strategy(), dtype=ints_or_floats_dtypes(), order=sampled_from(("F", "C")), ) def test_ones(self, shape, dtype, order): rt_ones = rt.ones(shape, dtype, order) np_ones = np.ones(shape=shape, dtype=dtype, order=order) assert isinstance(rt_ones, FastArray) assert_equal_(shape, rt_ones.shape) assert_equal_(dtype.type, rt_ones.dtype.type) assert_array_equal_(rt_ones, np_ones) # 1-D arrays always use Column-order. Otherwise, use the order specified if len(rt_ones.shape) > 1 and min(rt_ones.shape) > 1: assert np.isfortran(rt_ones) == (order == "F") else: assert not np.isfortran(rt_ones) @pytest.mark.xfail( reason="https://jira/browse/SOQTEST-6495 kwargs not implemented in riptable.ones" ) @pytest.mark.skipif( is_running_in_teamcity(), reason="Please remove alongside xfail removal." ) @given( shape=ndarray_shape_strategy(), dtype=ints_or_floats_dtypes(), order=sampled_from(("F", "C")), ) def test_ones_kwargs(self, shape, dtype, order): rt_ones = rt.ones(shape=shape, dtype=dtype, order=order) np_ones = np.ones(shape=shape, dtype=dtype, order=order) assert isinstance(rt_ones, FastArray) assert_equal_(shape, rt_ones.shape) assert_equal_(dtype.type, rt_ones.dtype.type) assert_array_equal_(rt_ones, np_ones) # 1-D arrays always use Column-order. Otherwise, use the order specified if len(rt_ones.shape) > 1 and min(rt_ones.shape) > 1: assert np.isfortran(rt_ones) == (order == "F") else: assert not np.isfortran(rt_ones) @pytest.mark.xfail( reason="https://jira/browse/SOQTEST-6563 riptable does not implement subok" ) @pytest.mark.skipif( is_running_in_teamcity(), reason="Please remove alongside xfail removal." ) @given( arr=arrays( shape=ndarray_shape_strategy(), dtype=ints_floats_datetimes_and_timedeltas() ), dtype=ints_floats_or_complex_dtypes(), order=sampled_from(("C", "F", "K", "A")), subok=booleans(), ) def test_ones_like(self, arr, dtype, order, subok): rt_ones_like = rt.ones_like(arr, dtype=dtype, order=order, subok=subok) np_ones_like = np.ones_like(arr, dtype=dtype, order=order, subok=subok) assert_equal_(dtype, rt_ones_like.dtype) assert_equal_(rt_ones_like.shape, arr.shape) assert_array_equal_(rt_ones_like, np_ones_like) # 1-D arrays always use Column-order. Otherwise, use the order specified if ( len(rt_ones_like.shape) > 1 and min(rt_ones_like.shape) > 1 and (order == "F" or ((order == "A" or order == "K") and np.isfortran(arr))) ): assert np.isfortran(rt_ones_like) else: assert not np.isfortran(rt_ones_like) if subok: assert isinstance(rt_ones_like, FastArray) else: assert isinstance(rt_ones_like, np.ndarray) # FastArray is a subclass of np.ndarray, so also ensure it is not a FastArray assert not isinstance(rt_ones_like, FastArray) @given( arr=arrays(shape=ndarray_shape_strategy(), dtype=floating_dtypes()), q=floats(min_value=0, max_value=100), ) def test_percentile_single(self, arr, q): interpolations = ["linear", "lower", "higher", "midpoint", "nearest"] for interpolation in interpolations: # todo - rt_percentile should come from riptable rt_output = np.percentile(arr, q=q, interpolation=interpolation, axis=None) np_output = np.percentile(arr, q=q, interpolation=interpolation, axis=None) assert_allclose(rt_output, np_output) @pytest.mark.xfail( reason="https://jira/browse/SOQTEST-6497 Riptable.percentile() does not return a FastArray", raises=AssertionError, ) @pytest.mark.skipif( is_running_in_teamcity(), reason="Please remove alongside xfail removal." ) @given( array_and_axis=generate_array_and_axis( shape=ndarray_shape_strategy(), dtype=floating_dtypes() ), q=lists( floats(min_value=0, max_value=100), min_size=10, max_size=50, unique=True ), ) def test_percentile_array(self, array_and_axis, q): arr, axis = array_and_axis interpolations = ["linear", "lower", "higher", "midpoint", "nearest"] for interpolation in interpolations: rt_output = rt.percentile(arr, q=q, interpolation=interpolation, axis=axis) np_output = np.percentile(arr, q=q, interpolation=interpolation, axis=axis) assert_allclose(rt_output, np_output) if axis and len(arr.shape) > 1: assert isinstance(rt_output, FastArray) @pytest.mark.xfail( reason="https://jira/browse/SOQTEST-6497 Riptable.putmask does not cast input to a FastArray", raises=AssertionError, ) @pytest.mark.skipif( is_running_in_teamcity(), reason="Please remove alongside xfail removal." ) @given( array_and_mask=generate_array_and_where( shape=ndarray_shape_strategy(), dtype=floating_dtypes(endianness="=") ), rep_val=floats(), ) def test_putmask(self, array_and_mask, rep_val): arr, mask = array_and_mask rt_err = None rt_putmask = arr.copy() np_err = None np_putmask = arr.copy() try: rt.putmask(rt_putmask, mask, rep_val) except OverflowError: rt_err = OverflowError try: np.putmask(np_putmask, mask, rep_val) except OverflowError: np_err = OverflowError if rt_err or np_err: assert rt_err == np_err else: assert_allclose(rt_putmask, np_putmask) assert isinstance(rt_putmask, FastArray) @pytest.mark.xfail( reason="RIP-341: quantile's _get_score raises an index error when getting the view from _val and using it to access the ndarray." ) @pytest.mark.skipif( is_running_in_teamcity(), reason="Please remove alongside xfail removal." ) @given( arr=arrays( shape=one_darray_shape_strategy(), dtype=floating_dtypes(sizes=(32, 64)) ) ) def test_quantile(self, arr): # For a one to one equivalence of supported interpolation methods between numpy and riptable: # 1) np.quantile would need to support riptable's fraction method # 2) rt.quantile would need to support numpy's linear, midpoint, and nearest methods # Numpy quantile reference https://docs.scipy.org/doc/numpy/reference/generated/numpy.quantile.html # Tracked in Jira https://jira/browse/RIP-344 interpolations = ["lower", "higher"] for interpolation in interpolations: q = random.random() rt_quantile, _, _ = rt.quantile(arr, q, interpolation) np_quantile = np.quantile(arr, q, interpolation=interpolation) assert_allclose( rt_quantile, np_quantile, err_msg=f"to reproduce use quantile {q}" ) @given( arr=arrays(shape=ndarray_shape_strategy(), dtype=ints_or_floats_dtypes()), rep=integers(min_value=0, max_value=10), axis=sampled_from((None, 0, 1)), ) def test_repeat_integer_repeats(self, arr, rep, axis): if axis == 1 and len(arr.shape) == 1: axis = 0 rt_repeat = rt.repeat(arr, repeats=rep, axis=axis) np_repeat = np.repeat(arr, repeats=rep, axis=axis) assert_allclose(np.array(rt_repeat), np_repeat) @given( arr_axis_and_rep=generate_array_axis_and_repeats_array( dtype=ints_or_floats_dtypes(), shape=ndarray_shape_strategy() ) ) def test_repeat_array_repeats(self, arr_axis_and_rep): arr, axis, rep = arr_axis_and_rep rt_repeat = rt.repeat(arr, repeats=rep, axis=axis) np_repeat = np.repeat(arr, repeats=rep, axis=axis) assert_allclose(np.array(rt_repeat), np_repeat) assert isinstance(rt_repeat, FastArray) @given( arr_and_dim=generate_reshape_array_and_shape_strategy( shape=ndarray_shape_strategy(), dtype=ints_or_floats_dtypes() ) ) def test_reshape(self, arr_and_dim): arr, dim = arr_and_dim orders = ["C", "F", "A"] for order in orders: rt_reshape = rt.reshape(arr, dim, order=order) np_reshape = np.reshape(arr, dim, order=order) assert_allclose(np.array(rt_reshape), np_reshape) assert np.isfortran(rt_reshape) == np.isfortran(np_reshape) assert isinstance(rt_reshape, FastArray) @pytest.mark.xfail( reason="https://jira/browse/SOQTEST-6530 riptable.round() does not return FastArray and decimals arg not yet implemented" ) @pytest.mark.skipif( is_running_in_teamcity(), reason="Please remove alongside xfail removal." ) @given( arr=arrays( shape=ndarray_shape_strategy(), dtype=floating_dtypes(sizes=(32, 64)) ) ) def test_round_array(self, arr): # TODO: Use range of decimals once https://jira/browse/SOQTEST-6530 is addressed decimals = [0] # decimals = range(-5, 100) for decimal in decimals: # test array rt_output = rt.round(arr, decimals=decimal) np_output = np.round(arr, decimals=decimal) assert_array_equal_(np.array(rt_output), np_output) assert isinstance(rt_output, FastArray) @pytest.mark.xfail( reason="https://jira/browse/SOQTEST-6530 should fail on +/-inf, and [-0.5,-0], since riptable.round calls builtin round. Decimals arg also not yet implemented" ) @pytest.mark.skipif( is_running_in_teamcity(), reason="Please remove alongside xfail removal." ) @given(scalar=floating_scalar()) def test_round_scalar(self, scalar): # TODO: Use range of decimals once https://jira/browse/SOQTEST-6530 is addressed decimals = [0] # decimals = range(-5, 100) for decimal in decimals: # test scalars rt_round_scalar = rt.round(scalar, decimals=decimal) np_round_scalar = np.round(scalar, decimals=decimal) assert_equal_( rt_round_scalar, np_round_scalar, err_msg=f"Rounding error on {type(scalar)} {scalar}", ) @pytest.mark.xfail(reason="RIP-345 - discrepancy between inserts position points") @pytest.mark.skipif( is_running_in_teamcity(), reason="Please remove alongside xfail removal." ) @given(arrays(shape=one_darray_shape_strategy(), dtype=integer_dtypes())) def test_searchsorted(self, arr): sides = {"left", "right"} arr.sort() v = arr[arr % 2 == 0] for side in sides: rt_indicies = rt.searchsorted(arr, v, side) np_indicies = np.searchsorted(arr, v, side) assert_array_equal_( rt_indicies, np_indicies, err_msg=f"using array {arr}\nvalues to insert {v}\nside {side}", ) @pytest.mark.xfail( reason="RIP-350 - see Python/core/riptable/tests/test_base_function.TestStd.test_std for a materialized counterexample" ) @pytest.mark.skipif( is_running_in_teamcity(), reason="Please remove alongside xfail removal." ) @given( arr=arrays( shape=one_darray_shape_strategy(max_shape_size=10), # do not consider overflow; numpy and riptable differ in how they behave on overflow elements=integers(-int(_MAX_INT / 10), int(_MAX_INT / 10)), dtype=integer_dtypes(endianness="=", sizes=(64,)), ) ) def test_std(self, arr): np_std = np.std(arr, ddof=1) rt_std = rt.std(rt.FastArray(arr)) assert_equal_(rt_std, np_std, decimal=6) @given( arr=arrays( shape=one_darray_shape_strategy(max_shape_size=10), # do not consider overflow; numpy and riptable differ in how they behave on overflow elements=integers(-int(_MAX_INT / 10), int(_MAX_INT / 10)), dtype=integer_dtypes(endianness="=", sizes=(64,)), ) ) def test_sum_int(self, arr): np_sum = np.sum(arr) rt_sum = rt.sum(rt.FastArray(arr)) assert_equal_(int(rt_sum), int(np_sum)) @pytest.mark.xfail( reason="RIP-351 - see Python/core/riptable/tests/test_base_function.TestSum.test_sum_float for a materialized counterexample" ) @given( arr=arrays( shape=one_darray_shape_strategy(max_shape_size=10), # do not consider overflow; numpy and riptable differ in how they behave on overflow elements=floats(-int(_MAX_FLOAT / 10), int(_MAX_FLOAT / 10)), dtype=floating_dtypes(endianness="=", sizes=(64,)), ) ) def test_sum_float(self, arr): np_sum = np.sum(arr) rt_sum = rt.sum(rt.FastArray(arr)) assert_equal_(rt_sum, np_sum, decimal=6) @pytest.mark.skip(reason="Resolve DeadlineExceeded errors on workflow runs") @given( arr=arrays( shape=ndarray_shape_strategy(max_shape_size=5), dtype=floating_dtypes(endianness="=", sizes=(32, 64)), ), shape=ndarray_shape_strategy(), ) def test_tile(self, arr, shape): np_result = np.tile(arr, shape) rt_result = rt.tile(arr, shape) assert_array_equal_(rt_result, np_result) @pytest.mark.xfail( reason="https://jira/browse/RIP-358 discrepency between rt.trunc and np.trunc" ) @pytest.mark.skipif( is_running_in_teamcity(), reason="Please remove alongside xfail removal." ) @given( data=data(), arr=arrays( shape=ndarray_shape_strategy(), dtype=floating_dtypes(sizes=(32, 64)) ), ) def test_trunc(self, data, arr): # TODO: Modify this to use the 'generate_array_and_where' strategy instead? where_arr = data.draw(arrays(shape=arr.shape, dtype=boolean_dtypes())) # Test #1: Returns new array containing results; no optional parameters provided. rt_output = rt.trunc(arr) np_output = np.trunc(arr) assert isinstance(rt_output, FastArray) assert_array_equal_(np.array(rt_output), np_output) # Test #2: Returns new array containing results; 'where' bitmask is provided. rt_output = rt.trunc(arr, where=where_arr) np_output = np.trunc(arr, where=where_arr) assert isinstance(rt_output, FastArray) assert_array_equal_(np.array(rt_output), np_output) # Test #3: Results written to array specified in 'out' parameter; no other optional params. rt_inplace = np.zeros_like(arr) np_inplace = np.zeros_like(arr) rt_output = rt.trunc(arr, out=rt_inplace) np_output = np.trunc(arr, out=np_inplace) assert_array_equal_(np.array(rt_inplace), np_inplace) # TODO: Add assertions for rt_output and np_output -- what are they expected to return when the 'out' parameter is specified? # assert isinstance(rt_output, FastArray, msg="riptable.trunc() did not return a FastArray") # Test #4: Results written to array specified in 'out' parameter; 'where' bitmask is provided. rt_inplace = np.zeros_like(arr) np_inplace = np.zeros_like(arr) rt_output = rt.trunc(arr, where=where_arr, out=rt_inplace) np_output = np.trunc(arr, where=where_arr, out=np_inplace) assert_array_equal_(np.array(rt_inplace), np_inplace) # TODO: Add assertions for rt_output and np_output -- what are they expected to return when the 'out' parameter is specified? # assert isinstance(rt_output, FastArray, msg="riptable.trunc() did not return a FastArray") @given(scalar=floating_scalar()) def test_trunc_scalar(self, scalar): # test scalars rt_trunc_scalar = rt.trunc(scalar) np_trunc_scalar = np.trunc(scalar) assert_equal_(rt_trunc_scalar, np_trunc_scalar) @given(examples=generate_sample_test_integers(num_bits=8, signed=False)) def test_uint8(self, examples): for i in examples: rt_uint8 = rt.uint8(i) np_uint8 = np.uint8(i) assert_equal_(rt_uint8, np_uint8) assert isinstance(rt_uint8, type(np_uint8)) rt_plus = rt_uint8 + 1 np_plus = np_uint8 + 1 assert_equal_(rt_plus, np_plus) assert isinstance(rt_plus, type(np_plus)) rt_minus = rt_uint8 - 1 np_minus = np_uint8 - 1 assert_equal_(rt_minus, np_minus) assert isinstance(rt_minus, type(np_minus)) @given(examples=generate_sample_test_integers(num_bits=16, signed=False)) def test_uint16(self, examples): for i in examples: rt_uint16 = rt.uint16(i) np_uint16 = np.uint16(i) assert_equal_(rt_uint16, np_uint16) assert isinstance(rt_uint16, type(np_uint16)) rt_plus = rt_uint16 + 1 np_plus = np_uint16 + 1 assert_equal_(rt_plus, np_plus) assert isinstance(rt_plus, type(np_plus)) rt_minus = rt_uint16 - 1 np_minus = np_uint16 - 1 assert_equal_(rt_minus, np_minus) assert isinstance(rt_minus, type(np_minus)) @given(examples=generate_sample_test_integers(num_bits=32, signed=False)) def test_uint32(self, examples): for i in examples: rt_uint32 = rt.uint32(i) np_uint32 = np.uint32(i) assert_equal_(rt_uint32, np_uint32) assert isinstance(rt_uint32, type(np_uint32)) rt_plus = rt_uint32 + 1 np_plus = np_uint32 + 1 assert_equal_(rt_plus, np_plus) assert isinstance(rt_plus, type(np_plus)) rt_minus = rt_uint32 - 1 np_minus = np_uint32 - 1 assert_equal_(rt_minus, np_minus) assert isinstance(rt_minus, type(np_minus)) @given(examples=generate_sample_test_integers(num_bits=64, signed=False)) def test_uint64(self, examples): for i in examples: rt_uint64 = rt.uint64(i) np_uint64 = np.uint64(i) assert_equal_(rt_uint64, np_uint64) assert isinstance(rt_uint64, type(np_uint64)) rt_plus = rt_uint64 + 1 np_plus = np_uint64 + 1 assert_equal_(rt_plus, np_plus) assert isinstance(rt_plus, type(np_plus)) rt_minus = rt_uint64 - 1 np_minus = np_uint64 - 1 assert_equal_(rt_minus, np_minus) assert isinstance(rt_minus, type(np_minus)) @given( arr=arrays( shape=one_darray_shape_strategy(), elements=integers(-_MAX_INT, _MAX_INT), dtype=integer_dtypes(endianness="=", sizes=(64,)), ) ) def test_unique(self, arr): rt_result = rt.unique(arr) np_result = np.unique(arr) assert_array_equal_( rt_result, np_result, err_msg=f"arr dtype {arr.dtype} {arr}\nrt {rt_result}\nnp {np_result}", ) @hypothesis.settings(suppress_health_check=[HealthCheck.too_slow]) @pytest.mark.xfail( reason="https://jira/browse/SOQTEST-6548 Riptable.vstack transposes arrays with shape (x,) and multiple (1,1)s. vstack also does not convert non-numbers to a FastArray" ) @pytest.mark.skipif( is_running_in_teamcity(), reason="Please remove alongside xfail removal." ) @given(tuple_of_arrays=generate_tuples_of_arrays(all_same_width=True)) def test_vstack(self, tuple_of_arrays): rt_err_type = None np_err_type = None rt_vstack = None np_vstack = None try: rt_vstack = rt.vstack(tuple_of_arrays) except ValueError: rt_err_type = ValueError try: np_vstack = np.vstack(tuple_of_arrays) except ValueError: np_err_type = ValueError if rt_err_type and np_err_type: assert rt_err_type == np_err_type else: assert isinstance(rt_vstack, FastArray) assert_array_equal_(np.array(rt_vstack), np_vstack) @pytest.mark.xfail( reason="See TestWhere.test_where for a materialized counterexample" ) @pytest.mark.skipif( is_running_in_teamcity(), reason="Please remove alongside xfail removal." ) @given( arr=arrays( shape=ndarray_shape_strategy(), dtype=integer_dtypes(endianness="=", sizes=(32, 64)), ) ) def test_where(self, arr): min, mean, max = arr.min(), arr.mean(), arr.max() # Break this out into pytest cases; RiptableNumpyEquivalencyTests inherit from unittest # which does not allow for pytest.mark.parametrize. np_gt_min, rt_gt_min = np.where(arr > min), rt.where(arr > min) assert_array_equal_( np_gt_min, rt_gt_min, err_msg=f"array elements greater than the minimum {min}", ) np_lt_mean, np_lt_mean = np.where(arr < mean), rt.where(arr < mean) assert_array_equal_( np_lt_mean, np_lt_mean, err_msg=f"array elements less than the mean {mean}" ) np_gt_mean, np_gt_mean = np.where(arr > mean), rt.where(arr > mean) assert_array_equal_( np_gt_mean, np_gt_mean, err_msg=f"array elements greater than the mean {mean}", ) np_lt_max, np_lt_max = np.where(arr < max), rt.where(arr < max) assert_array_equal_( np_lt_max, np_lt_max, err_msg=f"array elements less than the max {max}" ) @given( shape=ndarray_shape_strategy(), dtype=ints_or_floats_dtypes(), order=sampled_from(("F", "C")), ) def test_zeros(self, shape, dtype, order): rt_zeros = rt.zeros(shape, dtype, order) np_zeros = np.zeros(shape=shape, dtype=dtype, order=order) assert isinstance(rt_zeros, FastArray) assert_equal_(shape, rt_zeros.shape) assert_equal_(dtype.type, rt_zeros.dtype.type) assert_array_equal_(rt_zeros, np_zeros) # 1-D arrays always use Column-order. Otherwise, use the order specified if len(rt_zeros.shape) > 1 and min(rt_zeros.shape) > 1: assert np.isfortran(rt_zeros) == (order == "F") else: assert not np.isfortran(rt_zeros) @pytest.mark.xfail( reason="https://jira/browse/SOQTEST-6495 kwargs not implemented in riptable.zeros" ) @pytest.mark.skipif( is_running_in_teamcity(), reason="Please remove alongside xfail removal." ) @given( shape=ndarray_shape_strategy(), dtype=ints_or_floats_dtypes(), order=sampled_from(("F", "C")), ) def test_zeros_kwargs(self, shape, dtype, order): rt_zeros = rt.zeros(shape=shape, dtype=dtype, order=order) np_zeros = np.zeros(shape=shape, dtype=dtype, order=order) assert isinstance(rt_zeros, FastArray) assert_equal_(shape, rt_zeros.shape) assert_equal_(dtype.type, rt_zeros.dtype.type) assert_array_equal_(rt_zeros, np_zeros) # 1-D arrays always use Column-order. Otherwise, use the order specified if len(rt_zeros.shape) > 1 and min(rt_zeros.shape) > 1: assert np.isfortran(rt_zeros) == (order == "F") else: assert not np.isfortran(rt_zeros) @pytest.mark.xfail( reason="https://jira/browse/SOQTEST-6563 riptable does not implement subok" ) @pytest.mark.skipif( is_running_in_teamcity(), reason="Please remove alongside xfail removal." ) @given( arr=arrays( shape=ndarray_shape_strategy(), dtype=ints_floats_datetimes_and_timedeltas() ), dtype=ints_floats_or_complex_dtypes(), order=sampled_from(("C", "F", "K", "A")), subok=booleans(), ) def test_zeros_like(self, arr, dtype, order, subok): rt_zeros_like = rt.zeros_like(arr, dtype=dtype, order=order, subok=subok) np_zeros_like = np.zeros_like(arr, dtype=dtype, order=order, subok=subok) assert_equal_(dtype, rt_zeros_like.dtype) assert_equal_(rt_zeros_like.shape, arr.shape) assert_array_equal_(rt_zeros_like, np_zeros_like) # 1-D arrays always use Column-order. Otherwise, use the order specified if ( len(rt_zeros_like.shape) > 1 and min(rt_zeros_like.shape) > 1 and (order == "F" or ((order == "A" or order == "K") and np.isfortran(arr))) ): assert np.isfortran(rt_zeros_like) else: assert not np.isfortran(rt_zeros_like) if subok: assert isinstance(rt_zeros_like, FastArray) else: assert isinstance(rt_zeros_like, np.ndarray) # FastArray is a subclass of np.ndarray, so also ensure it is not a FastArray assert not isinstance(rt_zeros_like, FastArray)
{"hexsha": "1a0fc5a4210df0aa4ffd6e8e5c14f934dc0782b5", "size": 88713, "ext": "py", "lang": "Python", "max_stars_repo_path": "riptable/hypothesis_tests/test_riptide_numpy_equivalency.py", "max_stars_repo_name": "wenjuno/riptable", "max_stars_repo_head_hexsha": "7d519f6838b061dc66e38a7c2a54a1af9bbe4bb7", "max_stars_repo_licenses": ["BSD-2-Clause-Patent"], "max_stars_count": 307, "max_stars_repo_stars_event_min_datetime": "2020-08-27T20:25:11.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-08T15:51:19.000Z", "max_issues_repo_path": "riptable/hypothesis_tests/test_riptide_numpy_equivalency.py", "max_issues_repo_name": "wenjuno/riptable", "max_issues_repo_head_hexsha": "7d519f6838b061dc66e38a7c2a54a1af9bbe4bb7", "max_issues_repo_licenses": ["BSD-2-Clause-Patent"], "max_issues_count": 206, "max_issues_repo_issues_event_min_datetime": "2020-08-17T19:07:15.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-18T11:53:55.000Z", "max_forks_repo_path": "riptable/hypothesis_tests/test_riptide_numpy_equivalency.py", "max_forks_repo_name": "wenjuno/riptable", "max_forks_repo_head_hexsha": "7d519f6838b061dc66e38a7c2a54a1af9bbe4bb7", "max_forks_repo_licenses": ["BSD-2-Clause-Patent"], "max_forks_count": 10, "max_forks_repo_forks_event_min_datetime": "2020-08-28T00:22:05.000Z", "max_forks_repo_forks_event_max_datetime": "2021-04-30T20:22:28.000Z", "avg_line_length": 41.5907172996, "max_line_length": 866, "alphanum_fraction": 0.6174855996, "include": true, "reason": "import numpy,from numpy", "num_tokens": 20690}
abstract type Barrier end function drop(this::Barrier) this.mEnabled = false end struct BufferBarrier <: Barrier mEnabled::Bool mTarget::vk.VkCommandBuffer mSrcStage::vk.VkPipelineStageFlags mDstStage::vk.VkPipelineStageFlags mHandle::vk.VkBufferMemoryBarrier function BufferBarrier(cmd::RecordingCommandBuffer, buffer::vk.VkBuffer; srcStage::vk.VkPipelineStageFlags = 0, dstStage::vk.VkPipelineStageFlags = 0, srcAccessMask::vk.VkAccessFlags = 0, dstAccessMask::vk.VkAccessFlags = 0) this = new(true, cmd.getBuffer().handle(), srcStage, dstStage, vk.VkBufferMemoryBarrier( vk.VK_STRUCTURE_TYPE_BUFFER_MEMORY_BARRIER, # sType::VkStructureType C_NULL, # pNext::Ptr{Cvoid} srcAccessMask, # srcAccessMask::VkAccessFlags dstAccessMask, # dstAccessMask::VkAccessFlags vk.VK_QUEUE_FAMILY_IGNORED, # srcQueueFamilyIndex::UInt32 vk.VK_QUEUE_FAMILY_IGNORED, # dstQueueFamilyIndex::UInt32 buffer, # buffer::VkBuffer 0, # offset::VkDeviceSize vk.VK_WHOLE_SIZE, # size::VkDeviceSize )) return this end end @class BufferBarrier function apply(this::BufferBarrier) if this.mEnabled vk.vkCmdPipelineBarrier(this.mTarget, this.mSrcStage, this.mDstStage, 0, # dependencyFlags 0, # memoryBarrierCount C_NULL, # pMemoryBarriers 1, # bufferMemoryBarrierCount pointer([this.mHandle]), # pBufferMemoryBarriers 0, # imageMemoryBarrierCount C_NULL # pImageMemoryBarriers ) end end function handle(this::BufferBarrier) return this.mHandle end
{"hexsha": "2b45681a36f0932265cab870343a968dcce9dd45", "size": 2063, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/raii/Barriers.jl", "max_stars_repo_name": "gcmiao/Lava.jl", "max_stars_repo_head_hexsha": "fb9656944c63a9612927867cef9b0d0b33fe5f24", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-03-26T08:37:07.000Z", "max_stars_repo_stars_event_max_datetime": "2020-03-26T08:37:07.000Z", "max_issues_repo_path": "src/raii/Barriers.jl", "max_issues_repo_name": "gcmiao/Lava.jl", "max_issues_repo_head_hexsha": "fb9656944c63a9612927867cef9b0d0b33fe5f24", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/raii/Barriers.jl", "max_forks_repo_name": "gcmiao/Lava.jl", "max_forks_repo_head_hexsha": "fb9656944c63a9612927867cef9b0d0b33fe5f24", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 38.9245283019, "max_line_length": 99, "alphanum_fraction": 0.5627726612, "num_tokens": 444}
import unittest import numpy from axis_speeds import AxisSpeeds class TestAxisSpeeds(unittest.TestCase): def test_zero(self): speeds = AxisSpeeds() speeds.axis_rotation_speeds[0] = 0 # roll axis speeds.axis_rotation_speeds[1] = 0 # yaw axis speeds.axis_rotation_speeds[2] = 0 # pitch axis speeds.vertical_speed = 0 # up self.assertEqual(len(speeds.asArray()), 4) self.assertTrue((speeds.asArray() == numpy.zeros(4)).all()) # expected Result: zero matrix self.assertEqual(len(speeds.toMotorSignals()), 4) self.assertTrue((speeds.toMotorSignals() == numpy.zeros(4)).all()) def test_roll(self): speeds = AxisSpeeds() speeds.axis_rotation_speeds[0] = 1 # roll axis speeds.axis_rotation_speeds[1] = 0 # yaw axis speeds.axis_rotation_speeds[2] = 0 # pitch axis speeds.vertical_speed = 0 # up self.assertEqual(len(speeds.asArray()), 4) self.assertTrue((speeds.asArray() == numpy.array([1, 0, 0, 0])).all()) # expected Result: zero matrix self.assertEqual(len(speeds.toMotorSignals()), 4) self.assertTrue((speeds.toMotorSignals() == numpy.array([0.25, -0.25, 0.25, -0.25])).all()) def test_yaw(self): speeds = AxisSpeeds() speeds.axis_rotation_speeds[0] = 0 # roll axis speeds.axis_rotation_speeds[1] = 1 # yaw axis speeds.axis_rotation_speeds[2] = 0 # pitch axis speeds.vertical_speed = 0 # up self.assertEqual(len(speeds.asArray()), 4) self.assertTrue((speeds.asArray() == numpy.array([0, 1, 0, 0])).all()) # expected Result: zero matrix self.assertEqual(len(speeds.toMotorSignals()), 4) self.assertTrue((speeds.toMotorSignals() == numpy.array([0.25, -0.25, -0.25, 0.25])).all()) def test_pitch(self): speeds = AxisSpeeds() speeds.axis_rotation_speeds[0] = 0 # roll axis speeds.axis_rotation_speeds[1] = 0 # yaw axis speeds.axis_rotation_speeds[2] = 1 # pitch axis speeds.vertical_speed = 0 # up self.assertEqual(len(speeds.asArray()), 4) self.assertTrue((speeds.asArray() == numpy.array([0, 0, 1, 0])).all()) # expected Result: zero matrix self.assertEqual(len(speeds.toMotorSignals()), 4) self.assertTrue((speeds.toMotorSignals() == numpy.array([0.25, 0.25, -0.25, -0.25])).all()) def test_up(self): speeds = AxisSpeeds() speeds.axis_rotation_speeds[0] = 0 # roll axis speeds.axis_rotation_speeds[1] = 0 # yaw axis speeds.axis_rotation_speeds[2] = 0 # pitch axis speeds.vertical_speed = 1 # up self.assertEqual(len(speeds.asArray()), 4) self.assertTrue((speeds.asArray() == numpy.array([0, 0, 0, 1])).all()) # expected Result: zero matrix self.assertEqual(len(speeds.toMotorSignals()), 4) self.assertTrue((speeds.toMotorSignals() == numpy.array([0.25, 0.25, 0.25, 0.25])).all()) if __name__ == '__main__': unittest.main()
{"hexsha": "199a026bd08fb33d00f2ac119c0f466cb1b19691", "size": 3059, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/python3/axis_speeds_test.py", "max_stars_repo_name": "timdelbruegger/freecopter", "max_stars_repo_head_hexsha": "0d7023ba0fdbdfe2609b49e86bf2545fb4c8b2da", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2018-11-28T12:54:08.000Z", "max_stars_repo_stars_event_max_datetime": "2019-07-07T23:23:00.000Z", "max_issues_repo_path": "src/python3/axis_speeds_test.py", "max_issues_repo_name": "timdelbruegger/freecopter", "max_issues_repo_head_hexsha": "0d7023ba0fdbdfe2609b49e86bf2545fb4c8b2da", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/python3/axis_speeds_test.py", "max_forks_repo_name": "timdelbruegger/freecopter", "max_forks_repo_head_hexsha": "0d7023ba0fdbdfe2609b49e86bf2545fb4c8b2da", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 38.7215189873, "max_line_length": 99, "alphanum_fraction": 0.6243870546, "include": true, "reason": "import numpy", "num_tokens": 834}
# -*- coding: utf-8 -*- import glob import os from sklearn_evaluation.plot import confusion_matrix as plot_cm from data_load import load_data import numpy from keras import callbacks from keras.models import Sequential from keras.layers import Dense from keras.layers import Dropout from keras.constraints import maxnorm from keras.optimizers import SGD from keras.wrappers.scikit_learn import KerasClassifier from sklearn.ensemble import GradientBoostingClassifier from sklearn.cross_validation import cross_val_score from sklearn.cross_validation import StratifiedShuffleSplit, StratifiedKFold from sklearn.pipeline import Pipeline from sklearn.preprocessing import StandardScaler, MinMaxScaler, RobustScaler from sklearn.preprocessing import Imputer from sklearn.metrics import confusion_matrix from utils import print_cm_summary import matplotlib.pyplot as plt remote = callbacks.RemoteMonitor(root='http://localhost:9000') # fix random seed for reproducibility seed = 1 numpy.random.seed(seed) # load dataset data_dir = '/home/ilya/code/mllc/data/dataset_OGLE/indexes_normalized' fnames = glob.glob(os.path.join(data_dir, '*.log'))[::-1] n_cv_iter = 5 names = ['Magnitude', 'clipped_sigma', 'meaningless_1', 'meaningless_2', 'star_ID', 'weighted_sigma', 'skew', 'kurt', 'I', 'J', 'K', 'L', 'Npts', 'MAD', 'lag1', 'RoMS', 'rCh2', 'Isgn', 'Vp2p', 'Jclp', 'Lclp', 'Jtim', 'Ltim', 'CSSD', 'Ex', 'inv_eta', 'E_A', 'S_B', 'NXS', 'IQR'] names_to_delete = ['Magnitude', 'meaningless_1', 'meaningless_2', 'star_ID', 'Npts'] X, y, feature_names = load_data(fnames, names, names_to_delete) def create_baseline(): # create model model = Sequential() model.add(Dense(25, input_dim=25, init='normal', activation='relu')) model.add(Dense(25, init='normal', activation='relu')) # model.add(Dropout(0.1)) # model.add(Dense(25, init='normal', activation='relu', # W_constraint=maxnorm(1))) # model.add(Dropout(0.1)) # model.add(Dense(25, init='normal', activation='relu', # W_constraint=maxnorm(1))) # model.add(Dropout(0.1)) # model.add(Dropout(0.2)) model.add(Dense(13, init='normal', activation='relu')) # model.add(Dropout(0.2)) # model.add(Dense(25, init='normal', activation='relu', # W_constraint=maxnorm(3))) # model.add(Dropout(0.2)) # model.add(Dense(25, init='normal', activation='relu', # W_constraint=maxnorm(3))) # model.add(Dropout(0.2)) # model.add(Dense(13, init='normal', activation='relu', # W_constraint=maxnorm(3))) # model.add(Dropout(0.2)) model.add(Dense(1, init='normal', activation='sigmoid')) # Compile model learning_rate = 0.1 decay_rate = learning_rate / epochs momentum = 0.90 sgd = SGD(lr=learning_rate, decay=decay_rate, momentum=momentum, nesterov=False) model.compile(loss='binary_crossentropy', optimizer=sgd, metrics=['accuracy']) return model epochs = 50 # epochs = 125 batch_size = 12 # estimators = list() # estimators.append(('imputer', Imputer(missing_values='NaN', strategy='median', # axis=0, verbose=2))) # estimators.append(('scaler', StandardScaler())) # estimators.append(('mlp', KerasClassifier(build_fn=create_baseline, # nb_epoch=epochs, # batch_size=batch_size, # verbose=0))) # skf = StratifiedKFold(y, n_folds=5, shuffle=True, random_state=seed) # # pipeline = Pipeline(estimators) # # results = cross_val_score(pipeline, X, y, cv=skf, scoring='f1', n_jobs=3) # print("\n") # print(results) # print("\n") # print("Baseline: %.2f%% (%.2f%%)" % (results.mean()*100, results.std()*100)) # print("\n") # # # Check overfitting # sss = StratifiedShuffleSplit(y, n_iter=1, test_size=1. / n_cv_iter, # random_state=seed) # for train_index, test_index in sss: # X_train, X_test = X[train_index], X[test_index] # y_train, y_test = y[train_index], y[test_index] # # import keras # history = keras.callbacks.History() # print("Fitting...") # X_test_ = X_test.copy() # X_train_ = X_train.copy() # for name, transform in pipeline.steps[:-1]: # print(name, transform) # transform.fit(X_train_) # X_test_ = transform.transform(X_test_) # X_train_ = transform.transform(X_train_) # pipeline.fit(X_train, y_train, mlp__validation_data=(X_test_, y_test), # mlp__batch_size=batch_size, mlp__nb_epoch=epochs, # mlp__callbacks=[history]) # model = pipeline.named_steps['mlp'] # # y_pred = model.predict(X_test_) # y_pred[y_pred < 0.5] = 0. # y_pred[y_pred >= 0.5] = 1. # y_probs = model.predict_proba(X_test_) # cm = confusion_matrix(y_test, y_pred) # print_cm_summary(cm) # # plt.plot(history.history['acc']) # plt.plot(history.history['val_acc']) # plt.title('model accuracy') # plt.ylabel('accuracy') # plt.xlabel('epoch') # plt.legend(['train', 'test'], loc='upper left') # plt.show() # # summarize history for loss # plt.plot(history.history['loss']) # plt.plot(history.history['val_loss']) # plt.title('model loss') # plt.ylabel('loss') # plt.xlabel('epoch') # plt.legend(['train', 'test'], loc='upper left') # plt.show() # Build several cm skf = StratifiedKFold(y, n_folds=4, shuffle=True, random_state=seed) for train_index, test_index in skf: X_train, X_test = X[train_index], X[test_index] y_train, y_test = y[train_index], y[test_index] import keras history = keras.callbacks.History() print("Fitting...") X_test_ = X_test.copy() X_train_ = X_train.copy() estimators = list() estimators.append(('imputer', Imputer(missing_values='NaN', strategy='mean', axis=0, verbose=2))) estimators.append(('scaler', StandardScaler())) estimators.append(('mlp', KerasClassifier(build_fn=create_baseline, nb_epoch=epochs, batch_size=batch_size, verbose=0))) pipeline = Pipeline(estimators) for name, transform in pipeline.steps[:-1]: print(name, transform) transform.fit(X_train_) X_test_ = transform.transform(X_test_) X_train_ = transform.transform(X_train_) pipeline.fit(X_train, y_train, mlp__validation_data=(X_test_, y_test), mlp__batch_size=batch_size, mlp__nb_epoch=epochs, mlp__callbacks=[history]) model = pipeline.named_steps['mlp'] y_pred = model.predict(X_test_) y_pred[y_pred < 0.5] = 0. y_pred[y_pred >= 0.5] = 1. y_probs = model.predict_proba(X_test_) cm = confusion_matrix(y_test, y_pred) print_cm_summary(cm) # summarize history for loss plt.plot(history.history['acc']) plt.plot(history.history['val_acc']) plt.title('model accuracy') plt.ylabel('accuracy') plt.xlabel('epoch') plt.legend(['train', 'test'], loc='upper left') plt.show() plt.plot(history.history['loss']) plt.plot(history.history['val_loss']) plt.title('model loss') plt.ylabel('loss') plt.xlabel('epoch') plt.legend(['train', 'test'], loc='upper left') plt.show()
{"hexsha": "f168c26d7cc7f51f7f1fe5b94b57f9d2c349ec5c", "size": 7395, "ext": "py", "lang": "Python", "max_stars_repo_path": "mllc/cv_nnet.py", "max_stars_repo_name": "ipashchenko/mllc", "max_stars_repo_head_hexsha": "2bde6497432d23605b69534c6abf9119f11d3f8e", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "mllc/cv_nnet.py", "max_issues_repo_name": "ipashchenko/mllc", "max_issues_repo_head_hexsha": "2bde6497432d23605b69534c6abf9119f11d3f8e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "mllc/cv_nnet.py", "max_forks_repo_name": "ipashchenko/mllc", "max_forks_repo_head_hexsha": "2bde6497432d23605b69534c6abf9119f11d3f8e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.4285714286, "max_line_length": 80, "alphanum_fraction": 0.6431372549, "include": true, "reason": "import numpy", "num_tokens": 1894}
# !/usr/bin/env python3 # -*-coding:utf-8-*- # @file: # @brief: # @author: Changjiang Cai, [email protected], [email protected] # @version: 0.0.1 # @creation date: 29-09-2019 # @last modified: Sun 26 Jan 2020 03:06:57 AM EST # > see: http://jamesgregson.ca/bilateral-filtering-in-python.html import numpy as np import src.pfmutil as pfm import sys import math import torch import torch.nn as nn def filter_bilateral( img_in, sigma_s, sigma_v, reg_constant=1e-8 ): """Simple bilateral filtering of an input image Performs standard bilateral filtering of an input image. If padding is desired, img_in should be padded prior to calling Args: img_in (ndarray) monochrome input image sigma_s (float) spatial gaussian std. dev. sigma_v (float) value gaussian std. dev. reg_constant (float) optional regularization constant for pathalogical cases Returns: result (ndarray) output bilateral-filtered image Raises: ValueError whenever img_in is not a 2D float32 valued np.ndarray """ # check the input if not isinstance( img_in, np.ndarray ) or img_in.dtype != 'float32' or img_in.ndim != 2: raise ValueError('Expected a 2D np.ndarray with float32 elements') # make a simple Gaussian function taking the squared radius gaussian = lambda r2, sigma: (np.exp( -0.5*r2/sigma**2 )*3).astype(int)*1.0/3.0 # define the window width to be the 3 time the spatial std. dev. to # be sure that most of the spatial kernel is actually captured win_width = int( 3*sigma_s+1 ) # initialize the results and sum of weights to very small values for # numerical stability. not strictly necessary but helpful to avoid # wild values with pathological choices of parameters wgt_sum = np.ones( img_in.shape )*reg_constant result = img_in*reg_constant # accumulate the result by circularly shifting the image across the # window in the horizontal and vertical directions. within the inner # loop, calculate the two weights and accumulate the weight sum and # the unnormalized result image for shft_x in range(-win_width,win_width+1): for shft_y in range(-win_width,win_width+1): # compute the spatial weight w = gaussian( shft_x**2+shft_y**2, sigma_s ) # shift by the offsets off = np.roll(img_in, [shft_y, shft_x], axis=[0,1] ) # compute the value weight tw = w*gaussian( (off-img_in)**2, sigma_v ) # accumulate the results result += off*tw wgt_sum += tw # normalize the result and return return result/wgt_sum def gaussian_weights(sigma): kr = math.ceil(sigma*3) ks = int(kr*2+1) k = np.zeros((ks,ks)) for i in range(0,ks): for j in range(0,ks): y = i-kr x = j-kr k[i,j] = math.exp( - (x*x+y*y)/ (2*sigma*sigma) ) return k.astype(np.float32) def filter_bilateral_with_embeding( x_in, embed_in, sigma_s = 10.0, sigma_v = 0.1, reg_constant=1e-20, hard_lambda = 1.0): """Simple bilateral filtering of an input image Performs standard bilateral filtering of an input image. If padding is desired, embed_in should be padded prior to calling Args: x_in (ndarray) input array, in size [H,W,C], could be any input, like, RGB image, gray image, or cost volume, or cost volume slice embed_in (ndarray) embeding feature, used for generatin mask, in size [H,W,F] (same to PyTorch) sigma_s (float) spatial gaussian std. dev. sigma_v (float) value gaussian std. dev. reg_constant (float) optional regularization constant for pathalogical cases hard_lambda (float) hardness of mask based on embed_in, see parameter lambda in equ (3) in W. Harley' paper "Segmentation-Aware Convolutional Networks Using Local Attention Masks (ICCV'17)" Returns: result (ndarray) output bilateral-filtered image Raises: ValueError whenever embed_in is not a 3D float32 valued np.ndarray """ # check the input if not isinstance( embed_in, np.ndarray ) or embed_in.dtype != 'float32' or embed_in.ndim != 3: raise ValueError('Expected embed_in a 3D np.ndarray with float32 elements') if not isinstance( x_in, np.ndarray ) or x_in.dtype != 'float32' or x_in.ndim != 3: raise ValueError('Expected x_in a 3D np.ndarray with float32 elements') H, W, F = embed_in.shape[:] _,_, C = x_in.shape[:] #print ('embed_in shape = ', embed_in.shape) #print ('x_in shape = ', x_in.shape) # make a simple Gaussian function taking the squared radius #gaussian = lambda r2, sigma, c: (np.exp( c*-0.5*r2/sigma**2 )*3).astype(int)*1.0/3.0 gaussian = lambda r2, sigma, c: np.exp( c*-0.5*r2/sigma**2) # define the window width to be the 3 time the spatial std. dev. to # be sure that most of the spatial kernel is actually captured win_width = int( 3*sigma_s+1 ) #print ('win_size = {} x {}'.format(2*win_width+1, 2*win_width + 1)) # initialize the results and sum of weights to very small values for # numerical stability. not strictly necessary but helpful to avoid # wild values with pathological choices of parameters wgt_sum = np.ones([H,W])*reg_constant #print ("showing initial wgt_sum ") #pfm.show(wgt_sum) result = x_in * reg_constant L2_dist_suqre = lambda x,y: ( np.sum((x-y)**2, axis = -1)) # accumulate the result by circularly shifting the image across the # window in the horizontal and vertical directions. within the inner # loop, calculate the two weights and accumulate the weight sum and # the unnormalized result image """ generate gaussian filter for space weight """ sw_filter = np.zeros(( 2*win_width + 1, 2*win_width + 1)) for shft_y in range(-win_width,win_width+1): for shft_x in range(-win_width,win_width+1): # compute the spatial weight y_idx = shft_y + win_width x_idx = shft_x + win_width #print (type(x_idx), (type(y_idx))) sw = gaussian(shft_x**2+shft_y**2, sigma_s, hard_lambda) # scalar sw #print ("sw_filter[%d, %d] = %f" %(y_idx, x_idx, sw)) sw_filter[y_idx, x_idx] = sw #print ("sw_filter = ", sw_filter) # verify the sw_filter #sw_filter2 = gaussian_weights(sigma_s) #print (sw_filter2) #sys.exit() rw_accum = [] for shft_y in range(-win_width,win_width+1): for shft_x in range(-win_width,win_width+1): # compute the spatial weight #sw = gaussian( shft_x**2+shft_y**2, sigma_s, hard_lambda) # scalar sw sw = sw_filter[shft_y + win_width, shft_x + win_width] print ("sw[%d][%d] = %f" %(shft_y + win_width, shft_x + win_width, sw)) # shift by the offsets off_embed = np.roll(embed_in, [shft_y, shft_x, 0], axis=[0,1,2]) off_x = np.roll(x_in, [shft_y, shft_x, 0], axis=[0,1,2]) # range weight (rw) : [H, W] rw = gaussian(L2_dist_suqre(off_embed, embed_in),sigma_v, hard_lambda) #print ('rw shape = ', rw.shape) rw_accum.append(rw) #NOTE: debugging #sw = 1.0 tw =sw*rw # in shape [H, W] # accumulate the results #NOTE: # off_x in shape [H,W,C] (note: here C could be ndisp if x_in = cost volume) result += off_x * np.expand_dims(tw, axis = -1) # [H,W,C] wgt_sum += tw if 0: rw_all = np.stack(rw_accum, axis=-1) print ('rw_all shape = ', rw_all.shape) for i in range(0, rw_all.shape[2]): shft_x = i % (2*win_width+1) - win_width shft_y = i / (2*win_width+1) - win_width print("show shift [%d][%d] " %( shft_y, shft_x)) pfm.show(rw_all[:,:,i]) # normalize the result and return return result/ np.expand_dims(wgt_sum, axis = -1), wgt_sum # in shape [H,W,C] def get_gaussian_filter_width_from_sigma(sigma_s): return int(3*sigma_s + 1) def get_space_gaussian_filter(sigma_s = 0.1): """ make a simple Gaussian function taking the squared radius """ gaussian = lambda r2, sigma : np.exp( -0.5*r2/sigma**2) # define the window width to be the 3 time the spatial std. dev. to # be sure that most of the spatial kernel is actually captured win_width = get_gaussian_filter_width_from_sigma(sigma_s) #print ('win_size = {} x {}'.format(2*win_width+1, 2*win_width + 1)) """ generate gaussian filter for space weight """ sw_filter = np.zeros(( 2*win_width + 1, 2*win_width + 1)) for shft_y in range(-win_width,win_width+1): for shft_x in range(-win_width,win_width+1): # compute the spatial weight y_idx = shft_y + win_width x_idx = shft_x + win_width #print (type(x_idx), (type(y_idx))) sw = gaussian(shft_x**2+shft_y**2, sigma_s) # scalar sw #print ("sw_filter[%d, %d] = %f" %(y_idx, x_idx, sw)) sw_filter[y_idx, x_idx] = sw #print ("sw_filter = ", sw_filter) return sw_filter.astype(np.float32), win_width def im2col_pytorch(x, k, d): """ args: x : input, [N,C,H,W] k : window size d : dilation return: y : in shape [N, C*(k*k), H, W] """ N, C, H, W = x.size()[:] # 3D tensor in size [N, (k*k)*C, L], here L = H*W #print ('padding = ', k//2) #print ('[???]dilation = {}'.format(d)) p = d*(k-1)//2 unfold = nn.Unfold(kernel_size= k, dilation = d, padding = p, stride = 1) #NOTE: """ PyTorch im2col (i.e., nn.Unfold) flattens each k by k block into a column which conains C*(k*k) values, where k*k is a continugous chunk, with C be the Channel dimension. """ #return unfold(x).view(N, C, k*k, H, W) return unfold(x).view(N, C*k*k, H, W) def bilateral_embeding_im2col_pytorch( x_in_np, embed_in_np, sg_filter_np, win_width, dilation, sigma_v = 0.1, device_str='cuda:0' ): """ using im2col to change the filtering to matrix multiplication Performs standard bilateral filtering of an input image. Padding is taken care by the inside function im2col_pytorch()!!! Args: x_in_np (ndarray) input array, in size [H,W,C], could be any input, like, RGB image, gray image, or cost volume, or cost volume slice embed_in_np (ndarray) embeding feature, used for generatin mask, in size [H,W,F] (same to PyTorch) sg_filter_np (ndarray) spatial gaussia filter, given sigma_s as spatial gaussian std. dev. win_width (float) spatical gaussin filter half window size; sigma_v (float) value gaussian std. dev. Returns: result (Tensor) output bilateral-filtered image Raises: ValueError whenever embed_in is not a 3D float32 valued np.ndarray """ # check the input if not isinstance( embed_in_np, np.ndarray ) or embed_in_np.dtype != 'float32' or embed_in_np.ndim != 3: raise ValueError('Expected embed_in_np a 3D np.ndarray with float32 elements') if not isinstance( x_in_np, np.ndarray ) or x_in_np.dtype != 'float32' or x_in_np.ndim != 3: raise ValueError('Expected x_in_np a 3D np.ndarray with float32 elements') N = 1 # batch size k = 2*win_width + 1 H, W, F = embed_in_np.shape[:] _,_, C = x_in_np.shape[:] #print ('[??] embed_in_np shape = ', embed_in_np.shape) #print ('[??] x_in_np shape = ', x_in_np.shape) assert (embed_in_np.shape[:2] == x_in_np.shape[:2]) """ you can either use cpu or gpu to run this bilateral filtering """ #device = torch.device("cuda:0") #device = torch.device("cpu:0") device = torch.device(device_str) # H x W x C ==> N x C x H x W (here N = 1 for batch size) x_in = torch.from_numpy(x_in_np.transpose(2,0,1)).to(device).view(N,C,H,W) embed_in = torch.from_numpy(embed_in_np.transpose(2,0,1)).to(device).view(N,F,H,W) sg_filter = torch.from_numpy(sg_filter_np).to(device) reg_constant = 1e-20 _, result, filter_sum = apply_batch_bilateral_filter( embed_in, sg_filter, win_width, sigma_v, device, dilation, x_in, reg_constant) return result, filter_sum def apply_batch_bilateral_filter( embed_in, sg_filter, win_width, sigma_v, device, dilation, x_in = None, reg_constant=1e-20 # regularization constant ): """ using im2col to change the filtering to element-wise matrix multiplication Performs standard bilateral filtering of an input image. Padding is taken care by the inside function im2col_pytorch()!!! Args: embed_in (Tensor) embeding for generatin mask, in size [N,F,H,W] sg_filter (Tensor) spatial gaussia filter, given sigma_s as spatial gaussian std. dev. win_width (float) spatical gaussin filter half window size; sigma_v (float) value gaussian std. dev. reg_constant (float) optional regularization constant for pathalogical cases x_in (Tensor) input array, in size [N,C,H,W], if not None, then apply the bilateral filter to it; Returns: result (Tensor) output bilateral filter and/or bilateral-filtered x_in """ k = 2*win_width + 1 N,F,H,W = embed_in.shape[:] #print ('embed_in shape = ', embed_in.shape) """ apply im2col for embedding masking """ embed_im2col = im2col_pytorch(embed_in, k, dilation) # in shape [N, (k*k*F), H, W] embed_im2col = embed_im2col.view(N, F, k*k, H, W) #print ("sigma_v ", sigma_v) """ change to in-place operation to save GPU memory """ embed_tile = embed_in.view(N,F,1,H,W) embed_im2col -= embed_tile # broadcasting rg_filter = torch.exp_(-0.5*torch.sum(embed_im2col.pow_(2), dim = 1, keepdim= True) / (sigma_v**2)) # [N,1,k*k,H,W] #print ('rg_filter :', rg_filter[0,0,:,20,20]) sg_filter = sg_filter.view(1, 1, k*k, 1, 1) # in shape (N, 1, k*k, H, W) rg_filter *= sg_filter # broadcasting if x_in is None: result = None else: """ emplement eq (4) in W. Harley' paper "Segmentation-Aware Convolutional Networks Using Local Attention Masks (ICCV'17)" """ filter_sum = torch.ones([N, 1, H, W], device = device)*reg_constant """ broadcasting """ #unrolled for element-wise multiplication: N,C, H, W = x_in.shape[:] x_in_im2col = im2col_pytorch(x_in, k, dilation) # in shape [N, C*k*k,H,W] x_in_im2col = x_in_im2col.view(N,C,k*k, H,W) # in shape [N,C,k*k,H,W] #(N, C, k*k, H, W ) * (N, 1, k*k, H, W) => (N, C, k*k, H, W) result = x_in_im2col * rg_filter result = torch.sum(result, axis = 2) # N x C x H x W filter_sum += torch.sum(rg_filter, axis=2) # N x 1 x H x W # normalize the result and return # N x C x H x W result = result / filter_sum if 0: # show print ('rg_filter shape = ', rg_filter.shape) for i in range(0, k*k): shft_x = i % (2*win_width+1) - win_width shft_y = i / (2*win_width+1) - win_width print("show shift [%d][%d] " %( shft_y, shft_x)) pfm.show(rg_filter[0,0,i,:,:].cpu().numpy()) return rg_filter, result, filter_sum
{"hexsha": "a18f9404d1c331cfb36bcc33c274d7982aac562a", "size": 15824, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/bilateral_filter.py", "max_stars_repo_name": "ccj5351/DAFStereoNets", "max_stars_repo_head_hexsha": "66b720a4abbac9097a794eacef034bab641771d9", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 19, "max_stars_repo_stars_event_min_datetime": "2020-10-16T02:21:13.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-25T10:49:47.000Z", "max_issues_repo_path": "src/bilateral_filter.py", "max_issues_repo_name": "ccj5351/DAFStereoNets", "max_issues_repo_head_hexsha": "66b720a4abbac9097a794eacef034bab641771d9", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/bilateral_filter.py", "max_forks_repo_name": "ccj5351/DAFStereoNets", "max_forks_repo_head_hexsha": "66b720a4abbac9097a794eacef034bab641771d9", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 5, "max_forks_repo_forks_event_min_datetime": "2020-10-21T06:29:34.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-07T05:02:42.000Z", "avg_line_length": 39.2655086849, "max_line_length": 200, "alphanum_fraction": 0.6148887765, "include": true, "reason": "import numpy", "num_tokens": 4467}
from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals import os from keras import backend as K from keras.datasets import mnist from keras.utils import np_utils import keras import numpy as np import tensorflow as tf os.environ["CUDA_VISIBLE_DEVICES"] = "0" # for solving some specific problems, don't care config = tf.ConfigProto() config.gpu_options.allow_growth = True sess = tf.Session(config=config) def get_mnist(): image_size = 28 num_channels = 1 num_classes = 10 (X_train, y_train), (X_test, y_test) = mnist.load_data() X_test = X_test.reshape(X_test.shape[0], image_size, image_size, num_channels) X_test = X_test.astype('float32') X_test /= 255 Y_test = np_utils.to_categorical(y_test, num_classes) X_train = X_train.reshape(X_train.shape[0], image_size, image_size, num_channels) X_train = X_train.astype('float32') / 255 Y_train = np_utils.to_categorical(y_train, num_classes) return X_train, Y_train, X_test, Y_test # Get output of one specific layer def getlayer_output(l_in, l_out, x, model): # get_k_layer_output = K.function([model.layers[l_in].input, K.learning_phase()],[model.layers[l_out].output]) get_k_layer_output = K.function([model.layers[l_in].input, 0], [model.layers[l_out].output]) return get_k_layer_output([x])[0] if __name__ == '__main__': # 1. Get adversarial 'mnist' dataset. x_adv_fpath = './results/advs_1.npy' adv_data = np.load(x_adv_fpath) print(adv_data.shape) # 2. Load a trained 'MNIST_carlini' model. dataset_name = 'MNIST' model_name = 'carlini' model_weights_fpath = "%s_%s.keras_weights.h5" % (dataset_name, model_name) model_weights_fpath = os.path.join('downloads/trained_models', model_weights_fpath) from models.carlini_models import carlini_mnist_model model = carlini_mnist_model(logits=False, input_range_type=1, pre_filter=lambda x:x) model.load_weights(model_weights_fpath) model.summary() # 3. get output of each layer and store them in npy start_layer = 0 outputs = {} for hl_idx in range(start_layer, len(model.layers)): n_neurons = model.layers[hl_idx].output_shape[-1] # neurons in every layer print('layer name: ', model.layers[hl_idx].name, '# of neurons: ', n_neurons) layer = getlayer_output(0, hl_idx, adv_data, model).copy() print('h layer', model.layers[hl_idx].name, layer.shape) file_name = './adv_output/{0}_check_values.npy'.format(model.layers[hl_idx].name) np.save(file_name, layer)
{"hexsha": "6d4e7d1cde20fdb96f73c5596f0fae125907c73a", "size": 2651, "ext": "py", "lang": "Python", "max_stars_repo_path": "EvadeML-Zoo/adv_get_output.py", "max_stars_repo_name": "RU-System-Software-and-Security/NIC", "max_stars_repo_head_hexsha": "35073b5ae1b87a465de156d780ccea3d16b5a5cb", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 9, "max_stars_repo_stars_event_min_datetime": "2020-07-02T21:43:22.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-08T16:36:52.000Z", "max_issues_repo_path": "EvadeML-Zoo/adv_get_output.py", "max_issues_repo_name": "RU-System-Software-and-Security/NIC", "max_issues_repo_head_hexsha": "35073b5ae1b87a465de156d780ccea3d16b5a5cb", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 5, "max_issues_repo_issues_event_min_datetime": "2020-09-26T01:17:11.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-10T02:44:12.000Z", "max_forks_repo_path": "EvadeML-Zoo/adv_get_output.py", "max_forks_repo_name": "RU-System-Software-and-Security/NIC", "max_forks_repo_head_hexsha": "35073b5ae1b87a465de156d780ccea3d16b5a5cb", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.1375, "max_line_length": 114, "alphanum_fraction": 0.7223689174, "include": true, "reason": "import numpy", "num_tokens": 700}
"""Image Classification Class for metrics for sail-on.""" from sail_on_client.evaluate.metrics import ProgramMetrics from evaluate.metrics import M_acc, M_num, M_ndp, M_num_stats from evaluate.metrics import M_ndp_failed_reaction from evaluate.metrics import M_accuracy_on_novel from evaluate.metrics import topk_accuracy import numpy as np from pandas import DataFrame import pandas as pd from typing import Dict class ImageClassificationMetrics(ProgramMetrics): """Image Classification program metric class.""" def __init__( self, protocol: str, image_id: int, detection: int, classification: int ) -> None: """ Initialize. Args: protocol: Name of the protocol. image_id: Column id for image detection: Column id for predicting sample wise world detection classification: Column id for predicting sample wise classes Returns: None """ super().__init__(protocol) self.image_id = image_id self.detection_id = detection self.classification_id = classification self.novel_id = -1 # Added only for fixing typechecking problems def m_acc( self, gt_novel: DataFrame, p_class: DataFrame, gt_class: DataFrame, round_size: int, asymptotic_start_round: int, ) -> Dict: """ m_acc function. Args: gt_novel: ground truth detections (Dimension: [img X detection]) p_class: class predictions (Dimension: [img X prob that sample is novel, prob of 88 known classes]) gt_class: ground truth classes (Dimension: [img X detection, classification]) round_size: size of the round asymptotic_start_round: asymptotic samples considered for computing metrics Returns: Dictionary containing top1, top3 accuracy over the test, pre and post novelty. """ class_prob = p_class.iloc[:, range(1, p_class.shape[1])].to_numpy() gt_class_idx = gt_class.to_numpy() return M_acc( gt_novel, class_prob, gt_class_idx, round_size, asymptotic_start_round ) def m_acc_round_wise( self, p_class: DataFrame, gt_class: DataFrame, round_id: int ) -> Dict: """ m_acc_round_wise function. Args: p_class: class predictions gt_class: ground truth classes round_id: round identifier Returns: Dictionary containing top1, top3 accuracy for a round """ class_prob = p_class.iloc[:, range(1, p_class.shape[1])].to_numpy() gt_class_idx = gt_class.to_numpy() top1_acc = topk_accuracy(class_prob, gt_class_idx, k=1) top3_acc = topk_accuracy(class_prob, gt_class_idx, k=3) return { f"top1_accuracy_round_{round_id}": top1_acc, f"top3_accuracy_round_{round_id}": top3_acc, } def m_num(self, p_novel: DataFrame, gt_novel: DataFrame) -> float: """ m_num function. A Program Metric where the number of samples needed for detecting novelty. The method computes the number of GT novel samples needed to predict the first true positive. Args: p_novel: detection predictions (Dimension: [img X novel]) Nx1 vector with each element corresponding to probability of novelty gt_novel: ground truth detections (Dimension: [img X detection]) Nx1 vector with each element 0 (not novel) or 1 (novel) Returns: Difference between the novelty introduction and predicting change in world. """ return M_num(p_novel, gt_novel) def m_num_stats(self, p_novel: np.ndarray, gt_novel: np.ndarray) -> Dict: """ Program Metric. Number of samples needed for detecting novelty. The method computes number of GT novel samples needed to predict the first true positive. Args: p_novel: detection predictions (Dimension: [img X novel]) Nx1 vector with each element corresponding to probability of novelty gt_novel: ground truth detections (Dimension: [img X detection]) Nx1 vector with each element 0 (not novel) or 1 (novel) Returns: Dictionary containing indices for novelty introduction and change in world prediction. """ return M_num_stats(p_novel, gt_novel) def m_ndp( self, p_novel: np.ndarray, gt_novel: np.ndarray, mode: str = "full_test" ) -> Dict: """ Novelty Detection Performance: Program Metric. Novelty detection performance. The method computes per-sample novelty detection performance. Args: p_novel: detection predictions (Dimension: [img X novel]) Nx1 vector with each element corresponding to probability of it being novel gt_novel: ground truth detections (Dimension: [img X detection]) Nx1 vector with each element 0 (not novel) or 1 (novel) mode: the mode to compute the test. if 'full_test' computes on all test samples, if 'post_novelty' computes from first GT novel sample. If 'pre_novelty', only calculate before first novel sample. Returns: Dictionary containing novelty detection performance over the test. """ return M_ndp(p_novel, gt_novel, mode="full_test") def m_ndp_pre(self, p_novel: np.ndarray, gt_novel: np.ndarray) -> Dict: """ Novelty Detection Performance Pre Red Light. m_ndp_pre function. See :func:`~sail-on-client.evaluation.ImageClassificationMetrics.m_ndp` with post_novelty. This computes to the first GT novel sample. It really isn't useful and is just added for completion. Should always be 0 since no possible TP. Args: p_novel: detection predictions (Dimension: [img X novel]) gt_novel: ground truth detections (Dimension: [img X detection]) Returns: Dictionary containing detection performance pre novelty. """ return M_ndp(p_novel, gt_novel, mode="pre_novelty") def m_ndp_post(self, p_novel: np.ndarray, gt_novel: np.ndarray) -> Dict: """ Novelty Detection Performance Post Red Light. m_ndp_post function. See :func:`~sail-on-client.evaluation.ImageClassificationMetrics.m_ndp` with post_novelty. This computes from the first GT novel sample Args: p_novel: detection predictions (Dimension: [img X novel]) gt_novel: ground truth detections (Dimension: [img X detection]) Returns: Dictionary containing detection performance post novelty. """ return M_ndp(p_novel, gt_novel, mode="post_novelty") def m_ndp_failed_reaction( self, p_novel: DataFrame, gt_novel: DataFrame, p_class: DataFrame, gt_class: DataFrame, mode: str = "full_test", ) -> Dict: """ Additional Metric: Novelty detection when reaction fails. Not Implemented since no gt_class info for novel samples The method computes novelty detection performance for only on samples with incorrect k-class predictions Args: p_novel: detection predictions (Dimension: [img X novel]) Nx1 vector with each element corresponding to probability of novelty gt_novel: ground truth detections (Dimension: [img X detection]) Nx1 vector with each element 0 (not novel) or 1 (novel) p_class: detection predictions (Dimension: [img X prob that sample is novel, prob of 88 known classes]) Nx(K+1) matrix with each row corresponding to K+1 class probabilities for each sample gt_class: ground truth classes (Dimension: [img X classification]) Nx1 vector with ground-truth class for each sample mode: if 'full_test' computes on all test samples, if 'post_novelty' computes from the first GT novel sample. If 'pre_novelty', than everything before novelty introduced. Returns: Dictionary containing TP, FP, TN, FN, top1, top3 accuracy over the test. """ class_prob = p_class.iloc[:, range(1, p_class.shape[1])].to_numpy() gt_class_idx = gt_class.to_numpy() return M_ndp_failed_reaction(p_novel, gt_novel, class_prob, gt_class_idx) def m_accuracy_on_novel( self, p_class: DataFrame, gt_class: DataFrame, gt_novel: DataFrame ) -> Dict: """ Additional Metric: Novelty robustness. The method computes top-K accuracy for only the novel samples Args: p_class: detection predictions (Dimension: [img X prob that sample is novel, prob of known classes]) Nx(K+1) matrix with each row corresponding to K+1 class probabilities for each sample gt_class: ground truth classes (Dimension: [img X classification]) Nx1 vector with ground-truth class for each sample gt_novel: ground truth detections (Dimension: N X [img, classification]) Nx1 binary vector corresponding to the ground truth novel{1}/seen{0} labels Returns: Accuracy on novely samples """ class_prob = p_class.iloc[:, range(1, p_class.shape[1])].to_numpy() gt_class_idx = gt_class.to_numpy() return M_accuracy_on_novel(class_prob, gt_class_idx, gt_novel) def m_is_cdt_and_is_early(self, gt_idx: int, ta2_idx: int, test_len: int) -> Dict: """ Is change detection and is change detection early (m_is_cdt_and_is_early) function. Args: gt_idx: Index when novelty is introduced ta2_idx: Index when change is detected test_len: Length of test Returns Dictionary containing boolean showing if change was was detected and if it was detected early """ is_cdt = (ta2_idx >= gt_idx) & (ta2_idx < test_len) is_early = ta2_idx < gt_idx return {"Is CDT": is_cdt, "Is Early": is_early} def m_nrp(self, ta2_acc: Dict, baseline_acc: Dict) -> Dict: """ m_nrp function. Args: ta2_acc: Accuracy scores for the agent baseline_acc: Accuracy scores for baseline Returns: Reaction performance for the agent """ nrp = {} nrp["M_nrp_post_top3"] = 100 * (ta2_acc["post_top3"] / baseline_acc["pre_top3"]) nrp["M_nrp_post_top1"] = 100 * (ta2_acc["post_top1"] / baseline_acc["pre_top1"]) return nrp def convert_df(old_filepath: str, new_filepath: str) -> None: """ Convert from the old df to the new df. Args: old_filepath: the filepath the the old *_single_df.csv file new_filepath: the filepath the the old *_single_df.csv file Return: None """ df = pd.read_csv(old_filepath) df["id"] = df.current_path df["detection"] = df.cls_novelty.cummax() df["classification"] = df.class_id # Retain the class num for novel classes but as negative numbers mask = df.classification == -1 df.loc[mask, "classification"] = df.loc[mask, "current_path"].map( lambda x: -int(x.split("/")[-2]) ) df[["id", "detection", "classification"]].to_csv(new_filepath, index=False)
{"hexsha": "5081c12ca61bb3c8346aea398371a0aee2ecd7c6", "size": 11573, "ext": "py", "lang": "Python", "max_stars_repo_path": "sail_on_client/evaluate/image_classification.py", "max_stars_repo_name": "darpa-sail-on/sail-on-client", "max_stars_repo_head_hexsha": "1fd7c0ec359469040fd7af0c8e56fe53277d4a27", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-04-12T17:20:54.000Z", "max_stars_repo_stars_event_max_datetime": "2021-04-12T17:20:54.000Z", "max_issues_repo_path": "sail_on_client/evaluate/image_classification.py", "max_issues_repo_name": "darpa-sail-on/sail-on-client", "max_issues_repo_head_hexsha": "1fd7c0ec359469040fd7af0c8e56fe53277d4a27", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 92, "max_issues_repo_issues_event_min_datetime": "2021-03-08T22:32:15.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-25T03:53:01.000Z", "max_forks_repo_path": "sail_on_client/evaluate/image_classification.py", "max_forks_repo_name": "darpa-sail-on/sail-on-client", "max_forks_repo_head_hexsha": "1fd7c0ec359469040fd7af0c8e56fe53277d4a27", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 38.9663299663, "max_line_length": 115, "alphanum_fraction": 0.6419251707, "include": true, "reason": "import numpy", "num_tokens": 2582}
import torch import torch.nn as nn import torch.nn.functional as F import torch.optim as optim import numpy as np import matplotlib import matplotlib.pyplot as plt import matplotlib.animation as animation show_background = True write_movie_file = False plt.rcParams['animation.ffmpeg_path'] = 'C:\\Program Files\\ImageMagick-7.0.9-Q16\\ffmpeg.exe' # data used to train the embedding and classifier training_data = [ (['the', 'dog', 'eats'], ['DET', 'NN', 'V']), (['the', 'dog', 'eats', 'the', 'green', 'apple'], ['DET', 'NN', 'V', 'DET', 'ADJ', 'NN']), (['the', 'woman', 'reads', 'a', 'good', 'book'], ['DET', 'NN', 'V', 'DET', 'ADJ', 'NN']) ] # The words that are displayed in the graph and their colors display_words = ['the', 'a', 'woman', 'dog', 'apple', 'book', 'reads', 'eats', 'green', 'good'] display_colors = [0, 0, 1, 1, 1, 1, 2, 2, 3, 3] # defining PyTorch model architecture class SimpleTagger(nn.Module): def __init__(self, vocab_size, labels, embedding_dim = 2): super(SimpleTagger, self).__init__() self.word_embeddings = nn.Embedding(vocab_size, embedding_dim) self.linear = nn.Linear(embedding_dim, labels) def forward(self, sentence): embeddings = self.word_embeddings(sentence) return self.use_embeddings(embeddings), embeddings def use_embeddings(self, embeddings): tag_space = self.linear(embeddings) tag_scores = F.log_softmax(tag_space, dim=1) return tag_scores # helper function to convert sequences to the format PyTorch expects def prepare_sequence(seq, lookup_table): idxs = [lookup_table[w] for w in seq] return torch.tensor(idxs, dtype=torch.long) # getting the embeddings ready to be plotted def get_plot_data(): with torch.no_grad(): sentence_in = prepare_sequence(display_words, words_to_ids) tag_scores, embeddings = model(sentence_in) embeddings = embeddings.numpy() values, indices = tag_scores.T.max(0) same = [i for i, j in zip(display_colors, indices) if i == j] x = embeddings[:,0] y = embeddings[:,1] result = [x, y , display_colors] result.append(len(same)/10) return result # getting the classification layer ready to be plotted def get_plot_background_data(): with torch.no_grad(): xy = np.mgrid[-3.5:3.6:0.1, -3.5:3.6:0.1].reshape(2, -1).T tag_scores = model.use_embeddings(torch.from_numpy(xy).float()) values, indices = tag_scores.T.max(0) x = xy[:,0] y = xy[:,1] return [x, y ,indices] # creating the animation def animate(i): plt.cla() x1, y1, c1, same, x2, y2, c2 = plots[i] if show_background: ax.scatter(x2, y2, c=c2, alpha= 0.2, s=80, vmin=0, vmax=3) scatter = ax.scatter(x1, y1, c=c1, alpha=1.0, s=100,vmin=0, vmax=3) for j in range(len(x1)): ax.annotate(' '+display_words[j], (x1[j], y1[j])) plt.legend(handles=scatter.legend_elements()[0], labels=tags_to_ids, loc='upper right', fontsize=20) plt.xlim(-3.5,3.5) plt.ylim(-3.5,3.5) plt.title(f'Epoch: {i} Accuracy: {same:.0%}', fontsize=25) # computing lookup tables words_to_ids = {} for sent, tags in training_data: for word in sent: if word not in words_to_ids: words_to_ids[word] = len(words_to_ids) tags_to_ids = {} for sent, tags in training_data: for tag in tags: if tag not in tags_to_ids: tags_to_ids[tag] = len(tags_to_ids) ids_to_tags = {v: k for k, v in tags_to_ids.items()} # setting up the model model = SimpleTagger(len(words_to_ids), len(tags_to_ids)) loss_function = nn.NLLLoss() optimizer = optim.SGD(model.parameters(), lr=0.1) # the training loop plots=[] for epoch in range(200): for sentence, tags in training_data: model.zero_grad() sentence_in = prepare_sequence(sentence, words_to_ids) targets = prepare_sequence(tags, tags_to_ids) tag_scores, embedding = model(sentence_in) loss = loss_function(tag_scores, targets) loss.backward() optimizer.step() plot = get_plot_data() plot.extend(get_plot_background_data()) plots.append(plot) # showing the animation and writing the animation file font = {'size': 20} matplotlib.rc('font', **font) fig, ax = plt.subplots(figsize=(15, 15)) anim = animation.FuncAnimation(fig, animate, frames=200, interval=100, repeat = True) if write_movie_file: Writer = animation.writers['ffmpeg'] writer = Writer(fps=10, metadata=dict(artist='Me'), bitrate=1800) anim.save('video.mp4', writer=writer)
{"hexsha": "9b60cb0c304a84a97bb43ed165a83902981e157e", "size": 4725, "ext": "py", "lang": "Python", "max_stars_repo_path": "embedding_visualization.py", "max_stars_repo_name": "txtData/embeddingVis", "max_stars_repo_head_hexsha": "c36619dcb1d8c0d82500c2badec63770fa222e88", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "embedding_visualization.py", "max_issues_repo_name": "txtData/embeddingVis", "max_issues_repo_head_hexsha": "c36619dcb1d8c0d82500c2badec63770fa222e88", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "embedding_visualization.py", "max_forks_repo_name": "txtData/embeddingVis", "max_forks_repo_head_hexsha": "c36619dcb1d8c0d82500c2badec63770fa222e88", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.2611940299, "max_line_length": 104, "alphanum_fraction": 0.6395767196, "include": true, "reason": "import numpy", "num_tokens": 1265}
// // Copyright (c) 2019 Vinnie Falco ([email protected]) // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // // Official repository: https://github.com/cppalliance/json // // Test that header file is self-contained. #include <boost/json/parser.hpp> #include "test_suite.hpp" BOOST_JSON_NS_BEGIN class parser_test { public: void testCtors() { // parser(parser const&) BOOST_STATIC_ASSERT( ! std::is_copy_constructible<parser>::value); // operator=(parser const&) BOOST_STATIC_ASSERT( ! std::is_copy_assignable<parser>::value); // ~parser() { // implied } // parser(storage_ptr, parse_options, unsigned char*, size_t) { unsigned char buf[256]; parser p( storage_ptr(), parse_options(), &buf[0], sizeof(buf)); } // parser() { parser p; } // parser(storage_ptr, parse_options) { parser p(storage_ptr{}, parse_options()); } // parser(storage_ptr) { parser p(storage_ptr{}); } // parser(storage_ptr, parse_options, unsigned char[]) { unsigned char buf[256]; parser p( storage_ptr(), parse_options(), buf); } #if defined(__cpp_lib_byte) // parser(storage_ptr, parse_options, std::byte*, size_t) { std::byte buf[256]; parser p( storage_ptr(), parse_options(), &buf[0], sizeof(buf)); } // parser(storage_ptr, parse_options, std::byte[]) { std::byte buf[256]; parser p( storage_ptr(), parse_options(), buf); } #endif // parser(storage_ptr, parse_options, unsigned char[], size_t) { unsigned char buf[256]; parser p( storage_ptr(), parse_options(), buf, sizeof(buf)); } #if defined(__cpp_lib_byte) // parser(storage_ptr, parse_options, std::byte[], size_t) { std::byte buf[256]; parser p( storage_ptr(), parse_options(), buf, sizeof(buf)); } #endif } void testMembers() { // reset { parser p; p.reset(); } // write_some(char const*, size_t, error_code&) { // valid json { parser p; error_code ec; auto const n = p.write_some("null", 4, ec); BOOST_TEST(! ec); BOOST_TEST(n == 4); } // valid json with trailing space { parser p; error_code ec; auto const n = p.write_some("null ", 5, ec); BOOST_TEST(! ec); BOOST_TEST(n == 5); } // valid json with invalid trailing char { parser p; error_code ec; auto const n = p.write_some("null*", 5, ec); BOOST_TEST(! ec); BOOST_TEST(n == 4); } // partial json { parser p; error_code ec; p.write_some("nul", 3, ec); BOOST_TEST(ec); } } // write_some(string_view, error_code&) { // valid json { parser p; error_code ec; auto const n = p.write_some("null", ec); BOOST_TEST(! ec); BOOST_TEST(n == 4); } // partial json { parser p; error_code ec; p.write_some("nul", ec); BOOST_TEST(ec); } } // write_some(char const*, size_t) { // valid json with trailing space { parser p; auto const n = p.write_some("null ", 5); BOOST_TEST(n == 5); } // partial json { parser p; BOOST_TEST_THROWS( p.write_some("nul", 3), system_error); } } // write_some(string_view) { // valid json with trailing space { parser p; auto const n = p.write_some("null "); BOOST_TEST(n == 5); } // partial json { parser p; BOOST_TEST_THROWS( p.write_some("nul"), system_error); } } //-------------------------------------------------- // write(char const*, size_t, error_code&) { // valid json { parser p; error_code ec; auto const n = p.write("null", 4, ec); BOOST_TEST(! ec); BOOST_TEST(n == 4); } // valid json with trailing space { parser p; error_code ec; auto const n = p.write("null ", 5, ec); BOOST_TEST(! ec); BOOST_TEST(n == 5); } // valid json with invalid trailing char { parser p; error_code ec; p.write("null*", 5, ec); BOOST_TEST(ec); } // partial json { parser p; error_code ec; p.write("nul", 3, ec); BOOST_TEST(ec); } } // write(string_view, error_code&) { // valid json { parser p; error_code ec; auto const n = p.write("null", ec); BOOST_TEST(! ec); BOOST_TEST(n == 4); } // partial json { parser p; error_code ec; p.write("nul", ec); BOOST_TEST(ec); } } // write(char const*, size_t) { // valid json with trailing space { parser p; auto const n = p.write("null ", 5); BOOST_TEST(n == 5); } // valid json with invalid trailing char { parser p; BOOST_TEST_THROWS( p.write("null*", 5), system_error); } // partial json { parser p; BOOST_TEST_THROWS( p.write("nul", 3), system_error); } } // write(string_view) { // valid json with trailing space { parser p; auto const n = p.write("null "); BOOST_TEST(n == 5); } // valid json with invalid trailing char { parser p; BOOST_TEST_THROWS( p.write("null*"), system_error); } // partial json { parser p; BOOST_TEST_THROWS( p.write("nul"), system_error); } } // release { // valid json { parser p; p.write("[1,2,3]"); BOOST_TEST(p.release() == value({1,2,3})); } // release with no write { parser p; BOOST_TEST_THROWS( p.release(), system_error); } // release after error { parser p; error_code ec; p.write("nul", ec); BOOST_TEST(ec); BOOST_TEST_THROWS( p.release(), system_error); } } } void run() { testCtors(); testMembers(); } }; TEST_SUITE(parser_test, "boost.json.parser"); BOOST_JSON_NS_END
{"hexsha": "883878aa6fefe65368717e3b224fb453e2eb129a", "size": 9081, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "test/parser.cpp", "max_stars_repo_name": "djarek/json", "max_stars_repo_head_hexsha": "e1bd8083ed547de26b567e7eef02a6f1142a9128", "max_stars_repo_licenses": ["BSL-1.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "test/parser.cpp", "max_issues_repo_name": "djarek/json", "max_issues_repo_head_hexsha": "e1bd8083ed547de26b567e7eef02a6f1142a9128", "max_issues_repo_licenses": ["BSL-1.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "test/parser.cpp", "max_forks_repo_name": "djarek/json", "max_forks_repo_head_hexsha": "e1bd8083ed547de26b567e7eef02a6f1142a9128", "max_forks_repo_licenses": ["BSL-1.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 23.8346456693, "max_line_length": 79, "alphanum_fraction": 0.3693425834, "num_tokens": 1692}
import cv2 import numpy import openslide import os import uuid from flask import jsonify, request, render_template from flask_login import login_required, current_user from Controller import manifest_controller from Model import freehand_annotation_sqlite from Model import manifest original_data_root = 'static/data/Original_data/' freehand_annotation_data_root = "static/data/freehand_annotation_data/" def add_annotation_sever(app): @app.route('/freehand_annotation', methods=['GET', 'POST']) @login_required def freehand_annotation(): annotator_id = current_user.get_id() # annotator_id = request.args.get('annotator_id', default=1, type=int) annotation_project = request.args.get('project', default="None", type=str) slide_id = request.args.get('slide_id', default=-1, type=int) if slide_id == -1: try: slide_id = current_user.slideID[annotator_id + '_' + annotation_project + "_" + "freehand"] except: temp = [] for wsi in open('export/' + annotation_project + '_slide_table.txt').readlines(): slide_id = int(wsi.split('\t')[0]) temp.append(slide_id) slide_id = int(numpy.min(temp)) current_user.slideID[annotator_id + '_' + annotation_project + "_" + "freehand"] = slide_id info = manifest_controller.get_info_by_id(slide_id) dzi_file_path = "/static/data/dzi_data/" + str(info[1]) + '/' \ + str(info[2]) + ".dzi" if os.path.exists(dzi_file_path): slide_url = dzi_file_path else: slide_url = "/dzi_online/Data/Original_data/" + str(info[1]) + '/' \ + str(info[2]) + ".dzi" return render_template('freehand_annotation.html', slide_url=slide_url, slide_id=slide_id, annotator_id=annotator_id, slide_uuid=info[1], project=annotation_project) @app.route('/freehand_annotation/_get_info') @login_required def freehand_annotation_get_info(): slide_id = request.args.get('slide_id', default=1, type=int) annotator_id = request.args.get('annotator_id', default=1, type=int) annotation_project = request.args.get('project', default="None", type=str) slide_uuid = request.args.get('slide_uuid', default="", type=str) mani = manifest.Manifest() wsi = mani.get_project_by_id(slide_id) svs_file_path = original_data_root + wsi[1] + '/' + wsi[2] dimensions = openslide.OpenSlide(svs_file_path).dimensions MPP = openslide.OpenSlide(svs_file_path).properties.get("aperio.MPP") properties = dict(openslide.OpenSlide(svs_file_path).properties) w = dimensions[0] h = dimensions[1] return jsonify( img_width=w, img_height=h, um_per_px=MPP if MPP else 0.25, max_image_zoom=0, # max_image_zoom, toggle_status=0, # toggle_status properties=properties ) @app.route('/freehand_annotation/_clear_lines') @login_required def freehand_annotation_clear_lines(): slide_id = request.args.get('slide_id', default=1, type=int) annotator_id = request.args.get('annotator_id', default=1, type=int) annotation_project = request.args.get('project', default="None", type=str) slide_uuid = request.args.get('slide_uuid', default="", type=str) annotation_root_folder = freehand_annotation_data_root + annotation_project + '/' + slide_uuid + '/' if not os.path.exists(annotation_root_folder): os.mkdir(annotation_root_folder) annotaion_db = annotation_root_folder + 'a' + str(annotator_id) + '.db' db = freehand_annotation_sqlite.SqliteConnector(annotaion_db) db.delete_all_lines() return jsonify( exit_code=True, ) @app.route('/freehand_annotation/_undo_lines') @login_required def freehand_annotation_undo_lines(): slide_id = request.args.get('slide_id', default=1, type=int) annotator_id = request.args.get('annotator_id', default=1, type=int) annotation_project = request.args.get('project', default="None", type=str) slide_uuid = request.args.get('slide_uuid', default="", type=str) annotation_root_folder = freehand_annotation_data_root + annotation_project + '/' + slide_uuid + '/' if not os.path.exists(annotation_root_folder): os.mkdir(annotation_root_folder) annotaion_db = annotation_root_folder + 'a' + str(annotator_id) + '.db' db = freehand_annotation_sqlite.SqliteConnector(annotaion_db) db.del_max_branch() return jsonify( exit_code=True, ) @app.route('/freehand_annotation/_update_image') @login_required def freehand_annotation_update_image(): slide_id = request.args.get('slide_id', default=1, type=int) annotator_id = request.args.get('annotator_id', default=1, type=int) annotation_project = request.args.get('project', default="None", type=str) slide_uuid = request.args.get('slide_uuid', default="", type=str) annotation_root_folder = freehand_annotation_data_root + annotation_project + '/' + slide_uuid + '/' if not os.path.exists(annotation_root_folder): os.mkdir(annotation_root_folder) annotaion_db = annotation_root_folder + 'a' + str(annotator_id) + '.db' error_message = 'N/A' request_status = 0 v1 = request.args.get('var1', 0, type=int) # Top-left x coordinate v2 = request.args.get('var2', 0, type=int) # Top-left y coordinate v3 = request.args.get('var3', 0, type=int) # Bottom-right x v4 = request.args.get('var4', 0, type=int) # Bottom-right y v5 = request.args.get('var5', 0, type=int) # Viewer width (pixel) v6 = request.args.get('var6', 0, type=int) # Viewer height (pixel) loc_tl_coor = (v1, v2) loc_viewing_size = (v3 - v1, v4 - v2) loc_viewer_size = (v5, v6) # Update URL configuration slide_url = 'static/cache/' + str(uuid.uuid1())[:13] + '.png' mask = numpy.zeros([v6, v5, 4]) db = freehand_annotation_sqlite.SqliteConnector(annotaion_db) color = [[0, 0, 255, 255], [0, 255, 0, 255], [255, 0, 0, 255], [0, 0, 0, 255]] # print(db.get_lines()) for temp in db.get_lines_in_area(int(v1), int(v2), int(v3), int(v4)): cv2.line(mask, (int((temp[1] - v1) / (v3 - v1) * v5), int((temp[2] - v2) / (v4 - v2) * v6)), (int((temp[3] - v1) / (v3 - v1) * v5), int((temp[4] - v2) / (v4 - v2) * v6)), color[temp[5] - 1], 3) write_url = slide_url cv2.imwrite(write_url, mask) return jsonify( slide_url=slide_url + '?a=' + str(uuid.uuid1()), top_left_x=loc_tl_coor[0], top_left_y=loc_tl_coor[1], viewing_size_x=loc_viewing_size[0], viewing_size_y=loc_viewing_size[1], exit_code=request_status, error_message=error_message ) @app.route('/freehand_annotation/_record', methods=['GET', 'POST']) @login_required def freehand_annotation_record(): slide_id = request.args.get('slide_id', default=1, type=int) annotator_id = request.args.get('annotator_id', default=1, type=int) annotation_project = request.args.get('project', default="None", type=str) slide_uuid = request.args.get('slide_uuid', default="", type=str) annotation_root_folder = freehand_annotation_data_root + annotation_project + '/' + slide_uuid + '/' if not os.path.exists(annotation_root_folder): os.mkdir(annotation_root_folder) annotaion_db = annotation_root_folder + 'a' + str(annotator_id) + '.db' req_form = request.form num_of_points = int(len(req_form) / 3) pt_list_att = ('[x]', '[y]', '[grading]') db = freehand_annotation_sqlite.SqliteConnector(annotaion_db) # In one round, the grading and pslv could be updated only once. data = [] branch_id = -1 branch_id_find = [] for i in range(num_of_points): if int(req_form[str(i) + '[grading]']) > 0: if i == 0: continue if (int(req_form[str(i - 1) + '[x]']) == int(req_form[str(i) + '[x]'])) \ and (int(req_form[str(i - 1) + '[y]']) == int(req_form[str(i) + '[y]'])): continue data.append(tuple([int(req_form[str(i - 1) + '[x]']), int(req_form[str(i - 1) + '[y]']), int(req_form[str(i) + '[x]']), int(req_form[str(i) + '[y]']), int(req_form[str(i) + '[grading]'])])) elif int(req_form[str(i) + '[grading]']) == 0: db.delete_points(int(req_form[str(i) + '[x]']), int(req_form[str(i) + '[y]'])) elif int(req_form[str(i) + '[grading]']) == -1: db.delete_lines(int(req_form[str(i) + '[x]']), int(req_form[str(i) + '[y]'])) elif int(req_form[str(i) + '[grading]']) == -2: branch_id_find = db.find_lines(int(req_form[str(i) + '[x]']), int(req_form[str(i) + '[y]'])) if len(data): branch_id = db.incert_lines(data) return jsonify( pt_false_x=[], pt_false_y=[], region_id=0, branch_id=branch_id, branch_id_find=branch_id_find )
{"hexsha": "6dd69f2f087ed1bf1620b58d768b5b881a1d2ae7", "size": 9674, "ext": "py", "lang": "Python", "max_stars_repo_path": "Server/freehand_annotation_server.py", "max_stars_repo_name": "llmay98/WSI-online-analysis-framework", "max_stars_repo_head_hexsha": "1f2ee084b7ed87d22f9fbf041a6101c600104bfc", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2020-01-07T04:22:04.000Z", "max_stars_repo_stars_event_max_datetime": "2021-05-06T11:10:15.000Z", "max_issues_repo_path": "Server/freehand_annotation_server.py", "max_issues_repo_name": "llmay98/WSI-online-analysis-framework", "max_issues_repo_head_hexsha": "1f2ee084b7ed87d22f9fbf041a6101c600104bfc", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 4, "max_issues_repo_issues_event_min_datetime": "2021-06-02T13:19:46.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-12T00:11:11.000Z", "max_forks_repo_path": "Server/freehand_annotation_server.py", "max_forks_repo_name": "llmay98/WSI-online-analysis-framework", "max_forks_repo_head_hexsha": "1f2ee084b7ed87d22f9fbf041a6101c600104bfc", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-06-03T04:48:00.000Z", "max_forks_repo_forks_event_max_datetime": "2021-06-03T04:48:00.000Z", "avg_line_length": 43.3811659193, "max_line_length": 108, "alphanum_fraction": 0.6018193095, "include": true, "reason": "import numpy", "num_tokens": 2408}
from thyme.trajectory import Trajectory import numpy as np def replicate(trj, expand: tuple = (1, 1, 1)): new_trj = Trajectory() for idf in range(len(trj)): _trj = trj.extract_frames([idf]) cell = _trj.cells[0] new_positions = [] new_cells = [] new_forces = [] symbols = [] for i in range(expand[0]): for j in range(expand[1]): for k in range(expand[2]): new_positions.append( _trj.positions[0] + cell[0] * i + cell[1] * j + cell[2] * k ) new_forces.append(np.copy(_trj.forces[0])) symbols.append(np.copy(_trj.species)) new_positions = np.vstack(new_positions) new_forces = np.vstack(new_forces) symbols = np.hstack(symbols) cell = np.copy(_trj.cells[0]) for idir in range(3): for jdir in range(3): cell[idir, jdir] = cell[idir, jdir] * expand[idir] _trj.positions = new_positions.reshape([1, -1, 3]) _trj.forces = new_forces.reshape([1, -1, 3]) _trj.cells = cell.reshape([1, 3, 3]) _trj.natom = expand[0] * expand[1] * expand[2] * _trj.natom new_trj.add_trj(_trj) return new_trj
{"hexsha": "3f87b43a7d3de53d780a1c8aca209e5b24dabdc4", "size": 1291, "ext": "py", "lang": "Python", "max_stars_repo_path": "thyme/routines/manipulate.py", "max_stars_repo_name": "nw13slx/thyme", "max_stars_repo_head_hexsha": "b2a16aa1e6b0701adcfd2bd146f85b5c46b35254", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "thyme/routines/manipulate.py", "max_issues_repo_name": "nw13slx/thyme", "max_issues_repo_head_hexsha": "b2a16aa1e6b0701adcfd2bd146f85b5c46b35254", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "thyme/routines/manipulate.py", "max_forks_repo_name": "nw13slx/thyme", "max_forks_repo_head_hexsha": "b2a16aa1e6b0701adcfd2bd146f85b5c46b35254", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.8611111111, "max_line_length": 83, "alphanum_fraction": 0.5321456235, "include": true, "reason": "import numpy", "num_tokens": 364}
import errno import json import logging import matplotlib.pyplot as plt import numpy as np import os import pandas as pd from pathlib import Path import re import statsmodels.stats.multitest as multitest from biclust_comp import logging_utils def threshold_float_to_str(threshold_float): threshold_str = f"_thresh_{np.format_float_scientific(threshold_float, trim='-', exp_digits=1)}" return threshold_str def threshold_str_to_float(threshold_str): threshold_float = float(re.match(r'_thresh_(\de[-+]\d)', threshold_str)[1]) return threshold_float def ensure_dir(file_path): directory = os.path.dirname(file_path) if not os.path.exists(directory): os.makedirs(directory) def read_list_from_file(*filename_parts, strip_quotes=False): full_name = os.path.join(*filename_parts) if not os.path.exists(full_name): raise FileNotFoundError( errno.ENOENT, os.strerror(errno.ENOENT), full_name) with open(full_name, 'r') as f: lines = [l.strip() for l in f.readlines()] if strip_quotes: lines = [l.strip('"\'') for l in lines] return lines def read_int_from_file(*filename_parts): full_name = os.path.join(*filename_parts) if not os.path.exists(full_name): raise FileNotFoundError( errno.ENOENT, os.strerror(errno.ENOENT), full_name) with open(full_name, 'r') as f: contents = f.read() return int(contents.strip()) def read_np(*filename_parts): full_name = os.path.join(*filename_parts) if not os.path.exists(full_name): raise FileNotFoundError( errno.ENOENT, os.strerror(errno.ENOENT), full_name) return np.loadtxt(full_name, ndmin=2) def save_np(filename, obj): np.savetxt(filename, obj, delimiter='\t') def read_df(folder, filename): full_name = os.path.join(folder, filename) return read_matrix_tsv(full_name) def read_matrix_tsv(filename): return pd.read_csv(filename, delim_whitespace=True, index_col=None, header=None) def combine_inds_tissues(A, Z): """ Given a matrices corresponding to individuals (A) and to tissues (Z), return a matrix where rows now correspond to samples i.e. all tissues from all individuals. They will be arranged with all individuals from tissue 1, then all from tissue 2 etc. Args: A: (N, K) np.ndarray with each row corresponding to one individual Z: (T, K) np.ndarray with each row corresponding to one tissue Returns: Matrix with same dtype as A, with each column having entries (A_1k * Z_1k, A_2k * Z_1k, ..., A_Nk * Z_1k, A_1k * Z_2k, ..., A_Nk * Z_Tk) """ N, K = A.shape T, K_ = Z.shape assert K == K_, f"A and Z not compatible shapes to combine: A has shape"\ " {A.shape}, Z has shape {Z.shape} - need same number of columns" X = np.zeros((N*T, K), dtype=A.dtype) for k in range(K): logging.debug(f"Factor {k}, Z then A") logging.debug(Z[:10,k]) logging.debug(A[:10,k]) X[:,k] = np.kron(Z[:,k], A[:,k]) return X def remove_empty_factors(X_raw, B_raw, threshold=0): assert type(X_raw) == np.ndarray assert type(B_raw) == np.ndarray assert X_raw.shape[1] == B_raw.shape[1] factors_zero_X = np.all(abs(X_raw) <= threshold, axis=0) factors_zero_B = np.all(abs(B_raw) <= threshold, axis=0) # If a factor has all of X[:,k] equal to 0, or all of B[:,k] equal to 0, # this is an empty factor and we should exclude it factors_zero = (factors_zero_X) | (factors_zero_B) X_nonempty = X_raw[:, ~ factors_zero] B_nonempty = B_raw[:, ~ factors_zero] return X_nonempty, B_nonempty def binarise_matrix(matrix): return (matrix != 0).astype(int) def remove_below_threshold(matrix, threshold): copied = matrix.copy() copied[abs(matrix) < threshold] = 0 return copied def remove_below_threshold_norm_1(matrix, threshold): copied = matrix.copy() norms = np.linalg.norm(copied, axis=0) copied[abs(matrix / norms) < threshold] = 0 return copied def scale_factors_same_norm(X, B): X_norms = np.linalg.norm(X, axis=0) B_norms = np.linalg.norm(B, axis=0) logging.debug(X_norms) logging.debug(B_norms) combined_norms = X_norms * B_norms X_scaled = X * np.sqrt(combined_norms) / X_norms B_scaled = B * np.sqrt(combined_norms) / B_norms return X_scaled, B_scaled def scale_factors_by_X(X, B, desired_X_norm=1): X_norms = np.linalg.norm(X, axis=0) print(X_norms) X_scaled = X / X_norms * desired_X_norm B_scaled = B * X_norms / desired_X_norm return X_scaled, B_scaled def save_plot(filename): # Create directory if it doesn't alreayd exist directory = Path(filename).parent Path(directory).mkdir(parents=True, exist_ok=True) # Save figure plt.savefig(filename, bbox_inches='tight', dpi=300) logging.info(f"Saved figure to {filename}") def transform_dict_to_count_df(item_dict): """Given a dictionary, where each element of the dictionary is a list, return the data frame with columns all the elements that occur in any of the lists (union of the lists) and rows the keys of the dictionary. Each entry is the number of times that the item occurs in the list given by key. Input: {'A': ['1', '2'], 'B': ['2', '3', '5']} Output: 1 2 3 5 A 1 1 0 0 B 0 1 1 1 """ # Assemble df where each column corresponds to one list and there are # max(len(list)) rows df = pd.concat([pd.Series(v, name=k).astype(str) for k, v in item_dict.items()], axis=1) # Transform so that we have the desired form return pd.get_dummies(df.stack()).sum(level=1) def correct_multiple_testing(pval_df, alpha=0.0001): """Given a data frame filled with p-values, perform the FDR Benjamini-Yekutieli multiple testing adjustment to the p-values. Constrain the FDR to alpha Returns: reject dataFrame of bools indicating which null hypotheses should be rejected corrected_pvals dataFrame of p-values corrected for multiple testing (unaffected by alpha) """ flattened = pval_df.values.flatten() reject_flat, corrected_flat, _, _ = multitest.multipletests(flattened, alpha=alpha, method='fdr_by') reject = pd.DataFrame(reject_flat.reshape(pval_df.shape)) reject.index = pval_df.index reject.columns = pval_df.columns corrected = pd.DataFrame(corrected_flat.reshape(pval_df.shape), dtype ='float64') corrected.index = pval_df.index corrected.columns = pval_df.columns return reject, corrected def scaled_to_thresholded(X_scaled, B_scaled, threshold): X_thr_raw = remove_below_threshold_norm_1(X_scaled, threshold) B_thr_raw = remove_below_threshold_norm_1(B_scaled, threshold) X_thr, B_thr = remove_empty_factors(X_thr_raw, B_thr_raw) K = X_thr.shape[1] return K, X_thr, B_thr def scaled_to_thresholded_binary(X_scaled, B_scaled, threshold): K, X_thr, B_thr = scaled_to_thresholded(X_scaled, B_scaled, threshold) X_thr_bin = binarise_matrix(X_thr) B_thr_bin = binarise_matrix(B_thr) return K, X_thr_bin, B_thr_bin def read_result_threshold_binary(run_folder, threshold): K, X_thr, B_thr = read_result_threshold(run_folder, threshold) X_bin = binarise_matrix(X_thr) B_bin = binarise_matrix(B_thr) return K, X_bin, B_bin def read_result_scaled(run_folder): read_fail = False try: X_raw = read_np(run_folder, 'X.txt') B_raw = read_np(run_folder, 'B.txt') except ValueError: # Plaid just makes an empty file when K=0, # so we have to look out for being unable to read the matrix logging.info(f"Value error detected when reading files from {run_folder}") read_fail = True if not read_fail: X, B = remove_empty_factors(X_raw, B_raw) if run_folder.split('/')[1] == 'Plaid': X_scaled_raw, B_scaled_raw = X, B np.testing.assert_array_equal(binarise_matrix(X_scaled_raw), X, "Expecting Plaid to have binary matrix as X.txt") else: X_scaled_raw, B_scaled_raw = scale_factors_same_norm(X, B) X_scaled, B_scaled = remove_empty_factors(X_scaled_raw, B_scaled_raw) K = X_scaled.shape[1] else: K = 0 X_scaled = np.zeros((0,0)) B_scaled = np.zeros((0,0)) return K, X_scaled, B_scaled def read_result_threshold(run_folder, threshold): if run_folder.split('/')[1] == 'Plaid' and threshold != 0: K_thr = 0 X_thr = np.zeros((0,0)) B_thr = np.zeros((0,0)) else: K_scaled, X_scaled, B_scaled = read_result_scaled(run_folder) K_thr, X_thr, B_thr = scaled_to_thresholded(X_scaled, B_scaled, threshold) return K_thr, X_thr, B_thr def save_reconstructed_X(A_file, Z_file, X_outfile): A = read_np(A_file) Z = read_np(Z_file) X = combine_inds_tissues(A=A, Z=Z) save_np(X_outfile, X) def save_json(dictionary, jsonfile): with open(jsonfile, 'w') as f: json.dump(dictionary, f, indent=2) def load_json(jsonfile): with open(jsonfile, 'r') as f: dictionary = json.load(f) return dictionary def extract_dataset_from_mdr(mdr): split_mdr = mdr.split('/') dataset = "/".join(split_mdr[1:-1]) return dataset def read_error_df(maybe_df_file, *args, **kwargs): """Accept either a dataframe or a filename""" if isinstance(maybe_df_file, pd.DataFrame): df = maybe_df_file else: assert isinstance(maybe_df_file, str), \ f"Expecting either dataframe or filename, instead got {type(maybe_df_file)}" df = pd.read_csv(maybe_df_file, *args, **kwargs) return df
{"hexsha": "834cdf08e9af136ad69e22189c4cef5b26e19ccd", "size": 9916, "ext": "py", "lang": "Python", "max_stars_repo_path": "biclust_comp/utils.py", "max_stars_repo_name": "nichollskc/biclust_comp", "max_stars_repo_head_hexsha": "1b01d44e8a4ab4f40cf015e60bd39afb8616f89d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "biclust_comp/utils.py", "max_issues_repo_name": "nichollskc/biclust_comp", "max_issues_repo_head_hexsha": "1b01d44e8a4ab4f40cf015e60bd39afb8616f89d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "biclust_comp/utils.py", "max_forks_repo_name": "nichollskc/biclust_comp", "max_forks_repo_head_hexsha": "1b01d44e8a4ab4f40cf015e60bd39afb8616f89d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.3797468354, "max_line_length": 104, "alphanum_fraction": 0.6689189189, "include": true, "reason": "import numpy,import statsmodels", "num_tokens": 2580}
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for the key functions in pruner library.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function # import g3 from absl.testing import parameterized import numpy as np import tensorflow as tf # TODO(b/139939526): move to public API. from tensorflow.python.keras import keras_parameterized from tensorflow_model_optimization.python.core.keras import compat from tensorflow_model_optimization.python.core.sparsity_tf2 import pruner from tensorflow_model_optimization.python.core.sparsity.keras import pruning_schedule from tensorflow_model_optimization.python.core.sparsity.keras import pruning_utils test = tf.test class PruningTest(test.TestCase, parameterized.TestCase): def testPrunableModel(self): pass if __name__ == "__main__": test.main()
{"hexsha": "d30e349a6c7ba3faeea5ec9871bbb722cee13bca", "size": 1516, "ext": "py", "lang": "Python", "max_stars_repo_path": "tensorflow_model_optimization/python/core/sparsity_tf2/prunable_model_test.py", "max_stars_repo_name": "xwinxu/model-optimization", "max_stars_repo_head_hexsha": "c3d78bff440e1cb9f237d9b31f1047b36775db21", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-04-11T07:33:47.000Z", "max_stars_repo_stars_event_max_datetime": "2021-04-11T07:33:47.000Z", "max_issues_repo_path": "tensorflow_model_optimization/python/core/sparsity_tf2/prunable_model_test.py", "max_issues_repo_name": "xwinxu/model-optimization", "max_issues_repo_head_hexsha": "c3d78bff440e1cb9f237d9b31f1047b36775db21", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tensorflow_model_optimization/python/core/sparsity_tf2/prunable_model_test.py", "max_forks_repo_name": "xwinxu/model-optimization", "max_forks_repo_head_hexsha": "c3d78bff440e1cb9f237d9b31f1047b36775db21", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.6888888889, "max_line_length": 85, "alphanum_fraction": 0.7585751979, "include": true, "reason": "import numpy", "num_tokens": 307}
import numpy as np import copy import operator import matplotlib.pyplot as plt from .sampler import Sampler class QRBM: def __init__(self, n_visible, n_hidden, err_function='mse', use_tqdm=True, tqdm=None): if err_function not in {'mse', 'cosine'}: raise ValueError('err_function should be either \'mse\' or \'cosine\'') self._use_tqdm = use_tqdm self._tqdm = None if use_tqdm or tqdm is not None: from tqdm import tqdm self._tqdm = tqdm self.n_visible = n_visible self.n_hidden = n_hidden self.w = (np.random.rand(self.n_visible, self.n_hidden) * 2 - 1) * 0.01 self.visible_bias = (np.random.rand(self.n_visible) * 2 - 1) * 0.01 self.hidden_bias = (np.random.rand(self.n_hidden) * 2 - 1) * 0.01 self.sampler = Sampler() self.n_epoch = 0 def get_weights(self): return self.w, \ self.visible_bias, \ self.hidden_bias def set_weights(self, w, visible_bias, hidden_bias): self.w = w self.visible_bias = visible_bias self.hidden_bias = hidden_bias def set_qubo(self): visible_bias = self.visible_bias hidden_bias = self.hidden_bias w = self.w Q = {} for i in range(self.n_visible): Q[(i, i)] = -1 * visible_bias[i] for i in range(self.n_hidden): Q[(i + self.n_visible, i + self.n_visible)] = -1 * hidden_bias[i] for i in range(self.n_visible): for j in range(self.n_hidden): Q[(i, self.n_visible + j)] = -1 * w[i][j] self.Q = Q def sample_clamped_qubo(self, position_, value_, num_samps=100): """ Force samples to have specific values in positions """ assert(len(position_) == len(value_)) if not hasattr(self, 'Q'): self.set_qubo() n_to_clamp = len(position_) position = copy.deepcopy(position_) value = copy.deepcopy(value_) Q = copy.deepcopy(self.Q) clamp_strength = 50 for to_clamp in range(n_to_clamp): this_idx = position[to_clamp] this_value = value[to_clamp] if this_value == 0: # x=0 # Update bias to force sample[this_idx] to be 0 Q[(this_idx, this_idx)] = clamp_strength # Update w given sample[this_idx] = 0 for (x, y) in Q: if (x == this_idx or y == this_idx) and x != y: Q[(x, y)] *= 0 else: # x == 1: # Update bias to force sample[this_idx] to be 1 Q[(this_idx, this_idx)] = -1 * clamp_strength # Update w given sample[this_idx] = 1 for (x, y) in Q: if (x == this_idx or y == this_idx) and x != y: pass # no need to change self.samples, self.energies, self.num_occurrences = self.sampler.sample_qubo(Q, num_samps = num_samps) self.energies /= np.max(np.abs(self.energies)) return self.samples def get_Z(self): Z = np.sum(np.exp(-1 * self.energies)) self.Z = Z return Z def sample_qubo(self, num_samps=100): if not hasattr(self, 'Q'): self.set_qubo() self.samples, self.energies, self.num_occurrences = self.sampler.sample_qubo(self.Q, num_samps=num_samps) self.energies /= np.max(np.abs(self.energies)) self.get_Z() return self.samples def prediction_sample_to_probability_dict(self, ranges_to_predict, sample_idx_to_clamp, sample_value_to_clamp): self.get_Z() predictions_dicts = [] for range_to_predict_start, range_to_predict_end in ranges_to_predict: if len(self.samples) == 0: predictions_dicts.append(None) continue predictions_dict = {} sample_idx = 0 for sample, energy in zip(self.samples, self.energies): values_in_sample_which_should_be_clamped = sample[sample_idx_to_clamp] if type(values_in_sample_which_should_be_clamped) != list: values_in_sample_which_should_be_clamped = values_in_sample_which_should_be_clamped.tolist() if type(sample_value_to_clamp) != list: sample_value_to_clamp = sample_value_to_clamp.tolist() if values_in_sample_which_should_be_clamped != sample_value_to_clamp: continue y = sample[range_to_predict_start:range_to_predict_end] y_str = ','.join(str(y)) if y_str in predictions_dict: predictions_dict[y_str] += np.exp(-1.0 * energy) / self.Z else: predictions_dict[y_str] = np.exp(-1.0 * energy) / self.Z sample_idx += 1 predictions_dicts.append(predictions_dict) return predictions_dicts def predictions_dicts_to_max_values(self, predictions_dicts, digit_num=1): if digit_num != 1: raise ValueError('digit_num != 1 not supported yet') predictions = [] for predictions_dict in predictions_dicts: if predictions_dict == None or len(predictions_dict) == 0: prediction = np.random.randint(2) else: prediction_with_max_probability_tuple = max(predictions_dict.iteritems(), key=operator.itemgetter(1)) # print "max_y_probability_tuple: ",max_y_probability_tuple prediction = prediction_with_max_probability_tuple[0].split(',')[1:-1] # [1:-1] cuts square brackets prediction = [int(y) for y in prediction][0] # only 1 digit predictions.append(prediction) return predictions def predict_from_qubo(self, test_data, num_samps=100): predictions = [] size_x = len(test_data[0]) # size_y = self.n_visible - size_x for x in test_data: clamped_idx = range(size_x) clamped_values = x # samples = self.sample_qubo(num_samps=num_samps) ranges_to_predict = [(size_x, self.n_visible)] predictions_dicts = self.prediction_sample_to_probability_dict(ranges_to_predict, clamped_idx, clamped_values) predictions_dict = predictions_dicts[0] if predictions_dict == None or len(predictions_dict) == 0: predictions.append(None) continue max_y_probability_tuple = max(predictions_dict.iteritems(), key=operator.itemgetter(1)) max_y = max_y_probability_tuple[0].split(',')[1:-1] # [1:-1] cuts square brackets max_y = [int(y) for y in max_y] max_y_probability = max_y_probability_tuple[1] predictions.append((max_y, max_y_probability)) return predictions def train(self, training_data, len_x=1, len_y=1, epochs=1, lr=1, decay=0.01, num_samps=100, momentum=0.8, epochs_to_test=1, num_sams_for_test=100, print_training_data=False): """ maximize the product of probabilities assigned to some training set V optimize the weight vector single-step contrastive divergence (CD-1): 1. Take a training sample v, compute the probabilities of the hidden units and sample a hidden activation vector h from this probability distribution. 2. Compute the outer product of v and h and call this the positive gradient. 3. From h, sample a reconstruction v' of the visible units, then resample the hidden activations h' from this. (Gibbs sampling step) 4. Compute the outer product of v' and h' and call this the negative gradient. 5. Let the update to the weight matrix W be the positive gradient minus the negative gradient, times some learning rate 6. Update the biases a and b analogously: a=epsilon (v-v'), b=epsilon (h-h') https://en.wikipedia.org/wiki/Restricted_Boltzmann_machine """ if len_y != 0: random_idx_to_predict = np.random.randint(len(training_data)) print "Initial state, predicting data ", [ training_data[random_idx_to_predict][:len_x]], "result to be:", self.predict_from_qubo( [training_data[random_idx_to_predict][:len_x]], num_samps=num_sams_for_test) print "--------------------------" for epoch in self.tqdm(range(epochs)): # single step # 1 # 1.1 Take a training sample v random_selected_training_data_idx = epoch % len(training_data) v = training_data[random_selected_training_data_idx] if print_training_data: print "epoch #", self.n_epoch, " training data:", v # # 1.2 compute the probabilities of the hidden units clamped_idx = range(self.n_visible) clamped_values = v ranges_to_predict = [] for i in range(self.n_hidden): range_to_predict_start = self.n_visible + i range_to_predict_end = self.n_visible + i + 1 ranges_to_predict.append((range_to_predict_start, range_to_predict_end)) # _ = self.sample_clamped_qubo(clamped_idx, clamped_values, num_samps=1) _ = self.sample_qubo(num_samps=num_samps) predictions_dicts = self.prediction_sample_to_probability_dict(ranges_to_predict, clamped_idx, clamped_values) h = self.predictions_dicts_to_max_values(predictions_dicts) # 2 Compute the outer product of v and h and call this the positive gradient. pos_grad = np.outer(v, h) pos_w_grad = np.zeros(shape=[self.n_visible + self.n_hidden, self.n_visible + self.n_hidden]) # print "self.n_visible:",self.n_visible # print "self.n_hidden:",self.n_hidden # # print "left:",pos_J_grad[0:self.n_visible,self.n_visible:self.n_visible+self.n_hidden] # print "pos_grad:",pos_grad pos_w_grad[0:self.n_visible, self.n_visible:self.n_visible + self.n_hidden] = pos_grad # 3 # 3.1 From h, sample a reconstruction v' of the visible units, clamped_values = h clamped_idx = range(self.n_visible, self.n_visible + self.n_hidden) ranges_to_predict = [] for i in range(self.n_visible): ranges_to_predict.append((range_to_predict_start, range_to_predict_end)) # _ = self.sample_clamped_qubo(clamped_idx, clamped_values, num_samps=1) _ = self.sample_qubo(num_samps=num_samps) predictions_dicts = self.prediction_sample_to_probability_dict(ranges_to_predict, clamped_idx, clamped_values) v_dash = self.predictions_dicts_to_max_values(predictions_dicts) # print "v: ",v," v_dash: ",v_dash # 3.2 then resample the hidden activations h' from this. (Gibbs sampling step) clamped_values = v_dash clamped_idx = range(self.n_visible) ranges_to_predict = [] for i in range(self.n_hidden): ranges_to_predict.append((range_to_predict_start, range_to_predict_end)) # _ = self.sample_clamped_qubo(clamped_idx, clamped_values, num_samps=1) _ = self.sample_qubo(num_samps=num_samps) predictions_dicts = self.prediction_sample_to_probability_dict(ranges_to_predict, clamped_idx, clamped_values) h_dash = self.predictions_dicts_to_max_values(predictions_dicts) # 4 Compute the outer product of v' and h' and call this the negative gradient. neg_grad = np.outer(v_dash, h_dash) neg_w_grad = np.zeros(shape=[self.n_visible + self.n_hidden, self.n_visible + self.n_hidden]) neg_w_grad[0:self.n_visible, self.n_visible:self.n_visible + self.n_hidden] = neg_grad # 5 Let the update to the weight matrix W be the positive gradient minus the negative gradient, # times some learning rate def update_with_momentum(to_update, old_delta, new_delta, momentum): to_update += old_delta * momentum + (1 - momentum) * new_delta delta_w = lr * (pos_w_grad - neg_w_grad) if not hasattr(self, "delta_w"): self.delta_w = delta_w for i in range(self.n_visible): for j in range(self.n_hidden): # update_with_momentum(self.w[i,j], self.delta_w[(i,j)], delta_w[(i,j)], momentum) self.w[i, j] += delta_w[(i, j)] # 6 Update the biases a and b analogously: a=epsilon (v-v'), b=epsilon (h-h') delta_visible_bias = lr * (np.array(v) - np.array(v_dash)) # if not hasattr(self, "delta_visible_bias"): # self.delta_visible_bias = delta_visible_bias # update_with_momentum(self.visible_bias, self.delta_visible_bias, delta_visible_bias, momentum) self.visible_bias += delta_visible_bias delta_hidden_bias = lr * (np.array(h) - np.array(h_dash)) # if not hasattr(self, "delta_hidden_bias"): # self.delta_hidden_bias = delta_hidden_bias # update_with_momentum(self.hidden_bias, self.delta_hidden_bias, delta_hidden_bias, momentum) self.hidden_bias += delta_hidden_bias # save hyperparameters lr *= (1 - decay) self.lr = lr self.delta_w = delta_w self.delta_visible_bias = delta_visible_bias self.delta_hidden_bias = delta_hidden_bias self.set_qubo() if epoch == 0 and len_y == 0 and self.image_height != None: plt.figure() plt.axis('off') plt.title("Image reconstructed before training", y=1.03) plt.imshow(np.array(v_dash).reshape(self.image_height, -1)) # show training progress if epoch % epochs_to_test == 0 and len_y != 0: random_idx_to_predict = np.random.randint(len(training_data)) print "predicting data ", [ training_data[random_idx_to_predict][:len_x]], "result to be:", self.predict_from_qubo( [training_data[random_idx_to_predict][:len_x]], num_samps=num_sams_for_test) print "--------------------------" if epochs_to_test != -1 and epoch % epochs_to_test == 0 and len_y == 0: self.sample_qubo() sample = self.samples[0] if self.image_height != None: plt.figure() plt.axis('off') plt.title("Image reconstructed after training " + str(epoch + 1) + " epochs", y=1.03) plt.imshow(sample[:len_x].reshape(self.image_height, -1)) else: print "sampling data to be", sample[:self.n_visible] original = training_data[0] samples = self.samples[0][:self.n_visible] if type(original) != list: original = original.tolist() if type(samples) != list: samples = samples.tolist() if original == samples: print "Stopped training early because the model can reconstruct the inputs" break self.n_epoch += 1 return
{"hexsha": "eeb744ad8f54b94f587d6e1f499bfaff1a384c04", "size": 16070, "ext": "py", "lang": "Python", "max_stars_repo_path": "qrbm/qrbm.py", "max_stars_repo_name": "mercari/CFQIRBM", "max_stars_repo_head_hexsha": "fc785a0a62726ee47812aa26692984ee2c8f3d41", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 17, "max_stars_repo_stars_event_min_datetime": "2018-09-21T19:51:32.000Z", "max_stars_repo_stars_event_max_datetime": "2021-08-23T09:06:40.000Z", "max_issues_repo_path": "qrbm/qrbm.py", "max_issues_repo_name": "mercari/CFQIRBM", "max_issues_repo_head_hexsha": "fc785a0a62726ee47812aa26692984ee2c8f3d41", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "qrbm/qrbm.py", "max_forks_repo_name": "mercari/CFQIRBM", "max_forks_repo_head_hexsha": "fc785a0a62726ee47812aa26692984ee2c8f3d41", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 6, "max_forks_repo_forks_event_min_datetime": "2019-04-03T06:19:24.000Z", "max_forks_repo_forks_event_max_datetime": "2021-06-22T13:49:18.000Z", "avg_line_length": 44.2699724518, "max_line_length": 117, "alphanum_fraction": 0.5836963286, "include": true, "reason": "import numpy", "num_tokens": 3508}
import auxiliary.processthread as process import pandas as pd import matplotlib matplotlib.use('Agg') import matplotlib.pyplot as plt import seaborn as sns import numpy as np import json import keywords import argparse options = keywords.getarguments() plt.rcParams['svg.fonttype'] = 'none' plt.rcParams['pdf.fonttype'] = 42 sns.set(style="whitegrid", rc={'figure.figsize':(10,3)}) def select(timestep, time): timestep = json.loads(timestep) out = [] min_unmut = 1 for hostId, host in timestep.items(): host_ndna = 0 host_unmut = 0 for mitoId, mito in host['subcells'].items(): host_ndna += mito['n DNA'] if mito['n DNA'] > 0: host_unmut += mito['unmut'] *mito['n DNA'] dctout = {"time":time} dctout.update(host['evolvables']) # print(dctout) out.append(dctout) return out # picklefname='./ndna_at_death.pickle' # dfs = process.get(force=False, folder='../current', reverse=False, selector=select, start=0, verbose=True, sortbykeywordix=[("fission_rate", -1), ("fusion_rate", -1), ("deprecation_rate", -1), ("division_volume", -1)]) dfs = process.get(force=options.f, picklefname=keywords.nfile("evolvables.pickle"),runs=keywords.getruns(),folder=keywords.getfoldername(), selector=select, sortbykeywordix=keywords.getkeywordix(), sortbylineix=keywords.getlineix(),verbose=options.v, load = options.l) dfs2 = process.get(picklefname="../210923_mutlifetime/processing/evolvables.pickle",runs=keywords.getruns(),force=options.f, folder='../210923_mutlifetime/', selector=select, verbose=options.v, sortbykeywordix=keywords.getkeywordix(), sortbylineix=keywords.getlineix(), load=options.l) # dfs2 = process.get(force=options.f, picklefname=keywords.nfile("evolvablesrev.pickle"),runs=keywords.getruns(),folder="../210927_mutrange2", selector=select, reverse=True, stop=500, sortbykeywordix=keywords.getkeywordix(), sortbylineix=keywords.getlineix(),verbose=options.v) # print(dfs2.columns) for path in dfs2: dfs[path] = dfs2[path] # print(df) # facecolor=plt.gcf().get_facecolor() alldf =[] for path in dfs: df = pd.DataFrame(dfs[path]['data']) # df['fusion_rate'] = df['fusion_rate'] *1000000 if df['time'].iloc[-1] < 150000: if options.v: print(path, "not making plot due to early end") continue if options.v: print(path, " making plot") for key, val in dfs[path].items(): if key != 'data': # title += (key + " = " + str(val) + '\n') df[key] = val # df = df[(df['time'] %10000 == 0)] if options.v: print( df['fusion_rate']) df['path'] = path alldf.append(df) alldf= pd.concat(alldf, ignore_index=True) order = sorted(alldf["NDNA_MUT_LIFETIME"].unique()) g = sns.FacetGrid(data=alldf, row="NDNA_MUT_LIFETIME", hue="path", sharex=True,sharey=True, palette='colorblind', row_order=order, aspect=3, height=4) g.map(sns.scatterplot, 'time', 'fusion_rate', linewidth=0, s=0.1, alpha=0.7, rasterized=True) # plt.savefig(keywords.nfile("evolvables/fusionperiod"+ path[-4:] +".svg"), dpi=600) # fig, ax = plt.subplots() plt.savefig(keywords.nfile("allfusionperiod.pdf"), dpi=600) plt.savefig(keywords.nfile("allfusionperiod.png"), dpi=600) # g.map(fixed_boxplot, "rep", "value", order=reporder) # fig, ax = plt.subplots() # # g = sns.lineplot(data=df, x='time', y='value', hue="variable", palette="colorblind", ax=ax) # g = sns.scatterplot(data=df, x='time', y='fusion_rate', linewidth=0, palette="colorblind", ax=ax, s=1, alpha=1) # plt.legend(loc="upper left") # title = "fusion rate through time\n" # for key, val in dfs[path].items(): # if key != 'data': # title += (key + " = " + str(val) + '\n') # g.set_title(title, y=0.85) # fig.tight_layout() # # ax.title.set_y(0.5) # fig.subplots_adjust(top=0.75) # plt.savefig(keywords.nfile("evolvables/fusionperiod"+ path[-4:] +".svg"), dpi=600) # plt.savefig(keywords.nfile("evolvables/fusionperiod"+ path[-4:] +".png"), dpi=600) # plt.close()
{"hexsha": "b37b0ac4bd08852ff6972c0e57e57220f16a7fba", "size": 4140, "ext": "py", "lang": "Python", "max_stars_repo_path": "hpc/processing/plot_fusion_over_time.py", "max_stars_repo_name": "samvonderdunk/artistoo", "max_stars_repo_head_hexsha": "890fad12ee42d7fed78ea28d144ad3b2be321da5", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "hpc/processing/plot_fusion_over_time.py", "max_issues_repo_name": "samvonderdunk/artistoo", "max_issues_repo_head_hexsha": "890fad12ee42d7fed78ea28d144ad3b2be321da5", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "hpc/processing/plot_fusion_over_time.py", "max_forks_repo_name": "samvonderdunk/artistoo", "max_forks_repo_head_hexsha": "890fad12ee42d7fed78ea28d144ad3b2be321da5", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 40.5882352941, "max_line_length": 288, "alphanum_fraction": 0.6528985507, "include": true, "reason": "import numpy", "num_tokens": 1195}
[STATEMENT] lemma locLength_is_length_loc: "P\<^bsub>wf\<^esub>,P\<^bsub>\<Phi>\<^esub> \<turnstile> (None, h, (stk, loc, C, M, pc) # frs') \<surd> \<Longrightarrow> locLength P C M pc = length loc" [PROOF STATE] proof (prove) goal (1 subgoal): 1. P\<^bsub>wf\<^esub>,P\<^bsub>\<Phi>\<^esub> |- (None, h, (stk, loc, C, M, pc) # frs') [ok] \<Longrightarrow> locLength P C M pc = length loc [PROOF STEP] by (auto dest!: list_all2_lengthD simp: correct_state_def)
{"llama_tokens": 195, "file": "Slicing_JinjaVM_SemanticsWF", "length": 1}
@testset "unsqueeze" begin x = randn(2, 3, 2) @test @inferred(unsqueeze(x; dims=1)) == reshape(x, 1, 2, 3, 2) @test @inferred(unsqueeze(x; dims=2)) == reshape(x, 2, 1, 3, 2) @test @inferred(unsqueeze(x; dims=3)) == reshape(x, 2, 3, 1, 2) @test @inferred(unsqueeze(x; dims=4)) == reshape(x, 2, 3, 2, 1) @test unsqueeze(dims=2)(x) == unsqueeze(x, dims=2) end @testset "stack and unstack" begin x = randn(3,3) stacked = stack([x, x], dims=2) @test size(stacked) == (3,2,3) stacked_array=[ 8 9 3 5; 9 6 6 9; 9 1 7 2; 7 4 10 6 ] unstacked_array=[[8, 9, 9, 7], [9, 6, 1, 4], [3, 6, 7, 10], [5, 9, 2, 6]] @test unstack(stacked_array, dims=2) == unstacked_array @test stack(unstacked_array, dims=2) == stacked_array @test stack(unstack(stacked_array, dims=1), dims=1) == stacked_array end @testset "batch and unbatch" begin stacked_array=[ 8 9 3 5 9 6 6 9 9 1 7 2 7 4 10 6 ] unstacked_array=[[8, 9, 9, 7], [9, 6, 1, 4], [3, 6, 7, 10], [5, 9, 2, 6]] @test unbatch(stacked_array) == unstacked_array @test batch(unstacked_array) == stacked_array # no-op for vector of non-arrays @test batch([1,2,3]) == [1,2,3] @test unbatch([1,2,3]) == [1,2,3] # generic iterable @test batch(ones(2) for i=1:3) == ones(2, 3) @test unbatch(ones(2, 3)) == [ones(2) for i=1:3] @testset "tuple" begin @test batch([(1,2), (3,4)]) == ([1,3], [2,4]) @test batch([([1,2], [3,4]), ([5,6],[7,8])]) == ([1 5 2 6], [3 7 4 8]) end @testset "named tuple" begin @test batch([(a=1,b=2), (a=3,b=4)]) == (a=[1,3], b=[2,4]) @test batch([(a=1,b=2), (b=4,a=3)]) == (a=[1,3], b=[2,4]) nt = [(a=[1,2], b=[3,4]), (a=[5,6],b=[7,8])] @test batch(nt) == (a = [1 5 2 6], b = [3 7 4 8]) end @testset "dict" begin @test batch([Dict(:a=>1,:b=>2), Dict(:a=>3,:b=>4)]) == Dict(:a=>[1,3], :b=>[2,4]) @test batch([Dict(:a=>1,:b=>2), Dict(:b=>4,:a=>3)]) == Dict(:a=>[1,3], :b=>[2,4]) d = [Dict(:a=>[1,2], :b=>[3,4]), Dict(:a=>[5,6],:b=>[7,8])] @test batch(d) == Dict(:a => [1 5 2 6], :b => [3 7 4 8]) end end @testset "flatten" begin x = randn(Float32, 10, 10, 3, 2) @test size(flatten(x)) == (300, 2) end @testset "normalise" begin x = randn(Float32, 3, 2, 2) @test normalise(x) == normalise(x; dims=3) end @testset "chunk" begin cs = chunk(collect(1:10), 3) @test length(cs) == 3 @test cs[1] == [1, 2, 3, 4] @test cs[2] == [5, 6, 7, 8] @test cs[3] == [9, 10] end @testset "group_counts" begin d = group_counts(['a','b','b']) @test d == Dict('a' => 1, 'b' => 2) end @testset "rpad" begin @test rpad([1, 2], 4, 0) == [1, 2, 0, 0] @test rpad([1, 2, 3], 2, 0) == [1,2,3] end @testset "batchseq" begin bs = batchseq([[1, 2, 3], [4, 5]], 0) @test bs[1] == [1, 4] @test bs[2] == [2, 5] @test bs[3] == [3, 0] bs = batchseq([[1, 2, 3], [4, 5]], -1) @test bs[1] == [1, 4] @test bs[2] == [2, 5] @test bs[3] == [3, -1] end
{"hexsha": "298b9fc3406f7c8ec517797006ca03748dfd18ab", "size": 3502, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "test/utils.jl", "max_stars_repo_name": "dmolina/MLUtils.jl", "max_stars_repo_head_hexsha": "4ad22745693c05363a03172fbbf14725e606020d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "test/utils.jl", "max_issues_repo_name": "dmolina/MLUtils.jl", "max_issues_repo_head_hexsha": "4ad22745693c05363a03172fbbf14725e606020d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "test/utils.jl", "max_forks_repo_name": "dmolina/MLUtils.jl", "max_forks_repo_head_hexsha": "4ad22745693c05363a03172fbbf14725e606020d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.4259259259, "max_line_length": 89, "alphanum_fraction": 0.434608795, "num_tokens": 1440}
using Plots using LinearAlgebra include("mess.jl") using SparseArrays using Arpack #komvos1 komvos2 komvos3 xy = [ 0 0; 0 1]; #orismos komvon san xyz nodes = size(xy)[2]; #arithmos komvon #komvos1_start komvos1_end numel = [1 2 ]; #orismos ton stixion os grammes me toys akraious komvous N = size(numel)[1]; #arithmos stoixion dofs = 3*nodes; #orismos elastikotitas stixion a = 20e-2; E = 220e9.*ones(N,1); I = a^4 .*ones(N,1)./12; A = a^2 .*ones(N,1) #is square with axa rho = 7.8e3; #orismos ton desmeumenon komvon BC = zeros(dofs,1) #ignored = 1:3:dofs;#ignore turn of axis=ignore freedom BC[1:3] .= 1; #BC[ignored].=1; fixedofs = findall(x->x==1,BC[:]) freedofs = findall(x->x==0,BC[:]) #boundary conditions U = zeros(dofs+3*N,1); F = zeros(dofs+3*N,1); F[[4,5]] .= 1e3; #euresi olon ton deikton pou tha prospelasoun ton K alldofs = zeros(N,6); #orismos olikou mitroou stivarotitas global K = sparse([],[],Float64[],dofs+3*N,dofs+3*N); global M = sparse([],[],Float64[],dofs+3*N,dofs+3*N); ignored = [] #global K = zeros(dofs,dofs); #start main loop to calculate stiffness matrix for i=1:N i1 = numel[i,1] i2 = numel[i,2] #calclulate directions x1 = xy[1,i1]; x2 = xy[1,i2]; dx = x2-x1; y1 = xy[2,i1]; y2 = xy[2,i2]; dy = y2-y1; L = sqrt(dx^2+dy^2); nx = dx/L; ny = dy/L; #direction matrix c = nx; s = ny; Bbend = [-s c 0 0 0 0 0 0 0; 0 0 1 0 0 0 0 0 0; 0 0 0 -s c 0 0 0 0; 0 0 0 0 0 1 0 0 0; 0 0 0 0 0 0 -s c 0; 0 0 0 0 0 0 0 0 1]; Btens = [c s 0 0 0 0; 0 0 c s 0 0; 0 0 0 0 c s]; kbend = E[i]*I[i]/L^3; #insert tensile ktens = E[i]*A[i]/L global Ketens = (ktens/3)*[ 7 -8 1; -8 16 -8; 1 -8 7]; Ketens = Btens'*Ketens*Btens global Metens = (rho*A[i]*L/15)*[2 1 -1; 1 8 1; -1 1 2]; Metens = Btens'*Metens*Btens; Kebend = (kbend/35/(12+13*L)^2).*[4*(25776+55560*L+30695*L^2) 2*L*(80592+173240*L+95025*L^2) -512*(927+2040*L+1085*L^2) 512*L*(537+1175*L+665*L^2) -4*(56016+127800*L+75985*L^2) 2*L*(18384+42680*L+25585*L^2); 0 L^2*4*(118080+25400*L+13835*L^2) -256*L*(492+1040*L+555*L^2) 128*L^2*(354+770*L+435*L^2) -2*L*(17616+40120*L+23985*L^2) 2*L^2*(2832+6600*L+3995*L^2); 0 0 8192*(126+270*L+145*L^2) 1024*L*(12+40*L+25*L^2) -512*(1044+2280*L+1235*L^2) 256*L*(516+1120+605*L^2); 0 0 0 1024*L^2*(183+405*L+230*L^2) -512*L*(561+1255*L+715*L^2) 128*L^2*(378+850*L+485*L^2); 0 0 0 0 4*(189648+419640*L+234065*L^2) -2*L*(84432+186040*L+103025*L^2); 0 0 0 0 0 L^2*4*(12192+26680*L+14635*L^2)]; Kebend = Symmetric(Kebend); Kebend = Bbend'*Kebend*Bbend; Mebend = rho*A[i]*(L/(12+13*L)^2)*[(337248+750276*L+417883*L^2)/15015 L*(57552+132764*L+76767*L^2)/45045 8*(18408+41602*L+23423*L^2)/30030 -2*L*(31848+60578*L+27535*L^2)/45045 (81168+181304*L+10061*L^2)/30030 -L*(48720+104128*L+54703*L^2)/45045; 0 L^2*(4236+10167*L+6131*L^2)/45045 8*L*(5772+13573*L+7907*L^2)/45045 -2*L^2*(764+1381*L+556*L^2)/15015 L*(58704+139712*L+81263*L^2)/180180 -L^2*(1840+4208*L+2343*L^2)/60060; 0 0 1024*(858+1846+993*L^2)/15015 128*L*(156+556*L+415*L^2)/45045 8*(15912+32706*L+16783*L^2)/15015 -8*L*(4524+9125*L+4587*L^2)/45045; 0 0 0 128*L^2*(471+1029*L+568*L^2)/45045 2*L*(42456+98782*L+56745*L^2)/45045 -2*L^2*(1076+2515*L+1441*L^2)/15015; 0 0 0 0 (316032+673868*L+359463*L^2)/15015 -L*(49440+103456*L+54197*L^2)/45045; 0 0 0 0 0 L^2*(300+615*L+316*L^2)/4095]; Mebend = Symmetric(Mebend); Mebend = Bbend'*Mebend*Bbend; tensiledofs = [1 2 4 5 7 8]; Ke = Kebend .+0; Ke[tensiledofs,tensiledofs] = Ke[tensiledofs,tensiledofs][1,:,1,:] + Ketens; #println(size(Ke),"\n") ending = dofs+3*N edofs = [3*i1-2 3*i1-1 ending-3*i+3 ending-3*i+2 ending-3*i+1 3*i1+3 3*i2-2 3*i2-1 3*i2] push!(ignored,ending-3*i+3, ending-3*i+2, ending-3*i+1) K[edofs,edofs] = K[edofs,edofs][1,:,1,:] + Ke; #println(K,"\n") Me = Mebend .+0; Me[tensiledofs,tensiledofs] = Me[tensiledofs,tensiledofs][1,:,1,:] + Metens; M[edofs,edofs] = M[edofs,edofs][1,:,1,:] + Me; end Kfinal = K[vcat(freedofs,ignored),vcat(freedofs,ignored)] Mfinal = M[vcat(freedofs,ignored),vcat(freedofs,ignored)] rhs = F rhs = rhs[vcat(freedofs,ignored)] rhs = rhs - K[vcat(freedofs,ignored),fixedofs]*U[fixedofs] Ufree = Kfinal\rhs U[freedofs] = Ufree[freedofs] #Ufr = reshape(U,(3,nodes)) show(U[:]) #println(varinfo()) f = inv(Matrix(Mfinal))*Matrix(Kfinal) omega = sqrt.(abs.(eigvals(f)))/(2*pi) s = eigvecs(f) plot(omega) #calculate
{"hexsha": "e7b2e839b96a77da807584f1103de70b679e3cbe", "size": 5197, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "axnai julia/test.jl", "max_stars_repo_name": "billyon/first", "max_stars_repo_head_hexsha": "fba58ba3832d0b65fd6f3f3a7537d109d1e32eff", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-05-13T19:12:34.000Z", "max_stars_repo_stars_event_max_datetime": "2020-05-13T19:12:34.000Z", "max_issues_repo_path": "axnai julia/test.jl", "max_issues_repo_name": "billyon/first", "max_issues_repo_head_hexsha": "fba58ba3832d0b65fd6f3f3a7537d109d1e32eff", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "axnai julia/test.jl", "max_forks_repo_name": "billyon/first", "max_forks_repo_head_hexsha": "fba58ba3832d0b65fd6f3f3a7537d109d1e32eff", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 42.5983606557, "max_line_length": 257, "alphanum_fraction": 0.5320377141, "num_tokens": 2202}
import numpy as np from multiprocessing import sharedctypes, Pool, process import itertools import sys from netCDF4 import Dataset import os import argparse import timeit sys.path.append('/home/robinmid/repos/harvey_scaling/') sys.path.append('/home/robin/repos/harvey_scaling/') from analysis.utils import WORLD_REGIONS, SECTOR_GROUPS, get_axes_and_dirs, pickle_data, \ get_regions_dict, get_sectors_dict, make_figure_dir, get_index_list from analysis.dataformat import AggrData global data_array_shared global mask_array_shared process.current_process()._config['tempdir'] = '/p/tmp/robinmid/temp' class DataReader: def __init__(self, _vars, _ensemble_meta, _experiment_series_dir, _region_groups: dict, _sector_groups: dict, _lambda_axis, _duration_axis, _time_frame, _num_cpus=0): self.vars = _vars self.ensemble_meta = _ensemble_meta self.experiment_series_dir = _experiment_series_dir self.region_groups = _region_groups self.sector_groups = _sector_groups self.lambda_axis = _lambda_axis self.duration_axis = _duration_axis self.time_frame = _time_frame self.num_cpus = _num_cpus self.region_indices = {} self.sector_indices = {} data_array = np.zeros((len(self.vars), len(self.region_groups), len(self.sector_groups), len(self.lambda_axis), len(self.duration_axis), self.time_frame)) mask_array = data_array != 0 data_array_ctypes = np.ctypeslib.as_ctypes(data_array) mask_array_ctypes = np.ctypeslib.as_ctypes(mask_array) global data_array_shared global mask_array_shared data_array_shared = sharedctypes.RawArray(data_array_ctypes._type_, data_array_ctypes) mask_array_shared = sharedctypes.RawArray(mask_array_ctypes._type_, mask_array_ctypes) def go(self): l_d_tuples = itertools.product(self.lambda_axis, self.duration_axis) if self.num_cpus == 0: self.num_cpus = os.cpu_count() print("Starting {} workers.".format(self.num_cpus)) p = Pool(self.num_cpus) p.starmap(self.read_file, l_d_tuples) result_data = np.ctypeslib.as_array(data_array_shared) result_mask = np.ctypeslib.as_array(mask_array_shared) result = np.ma.array(result_data, mask=result_mask) return AggrData(result, np.array(self.vars), self.region_groups, self.sector_groups, np.array(self.lambda_axis), np.array(self.duration_axis)) def read_file(self, lambda_value_, duration_value_): l_ = np.where(self.lambda_axis == lambda_value_)[0] d_ = np.where(self.duration_axis == duration_value_)[0] if (lambda_value_, duration_value_) not in ensemble_meta['scaled_scenarios'].keys(): print("##### Key ({}, {}) not found. Skipping.".format(lambda_value_, duration_value_)) return iteration_path = os.path.join(self.experiment_series_dir, self.ensemble_meta['scaled_scenarios'][(lambda_value_, duration_value_)][ 'iter_name'] + "/output.nc") print("##### Reading from file {}".format(iteration_path)) try: dataset = Dataset(iteration_path, "r", format="NETCDF4") except Exception as e: print("File {} could not be loaded.".format(iteration_path)) print(e) return iteration_sim_duration = len(dataset.variables['time']) iteration_time_frame = self.time_frame if iteration_sim_duration < iteration_time_frame: print("Attention! Dataset {} contains less time steps ({}) than requested ({}). Continue with " "dataset length instead, mask remaining length.".format(iteration_path, iteration_sim_duration, self.time_frame)) iteration_time_frame = iteration_sim_duration data_tmp = np.ctypeslib.as_array(data_array_shared) mask_tmp = np.ctypeslib.as_array(mask_array_shared) for v, var in enumerate(self.vars): _data = dataset["/agents/" + var.replace('/', '')][:].data _data = np.ma.array(_data, mask=np.isnan(_data), fill_value=0) for r, region_group_name in enumerate(self.region_groups): if region_group_name in self.region_indices.keys(): iter_region_indices = self.region_indices[region_group_name] else: iter_region_indices = get_index_list(self.region_groups[region_group_name], dataset["/region"][:]) self.region_indices[region_group_name] = iter_region_indices for s, sector_group_name in enumerate(self.sector_groups): if sector_group_name in self.sector_indices.keys(): iter_sector_indices = self.sector_indices[sector_group_name] else: iter_sector_indices = get_index_list(self.sector_groups[sector_group_name], dataset["/sector"][:]) self.sector_indices[sector_group_name] = iter_sector_indices iter_series = np.sum( np.sum(_data[:iteration_time_frame, iter_sector_indices, :][:, :, iter_region_indices], axis=1), axis=1) data_tmp[v, r, s, l_, d_, :iteration_time_frame] = iter_series mask_tmp[v, r, s, l_, d_, iteration_time_frame:] = data_tmp[v, r, s, l_, d_, iteration_time_frame:] == 0 dataset.close() del _data del data_tmp del mask_tmp if __name__ == '__main__': parser = argparse.ArgumentParser(description='') parser.add_argument('ensemble_dir', type=str, help='') parser.add_argument('--num_cpus', type=int, default=0, help='') parser.add_argument('--variable', type=str, default='all', help='Variable to read. If not specified, standard ' 'set of variables will beused.') parser.add_argument('--region', type=str, default='all', help='Region to read. If not specified, standard set' 'of regions will beused.') parser.add_argument('--sector', type=str, default='all', help='Sector to read. If not specified, standard set' 'of sectors will beused.') parser.add_argument('--output', type=str, default='', help='Output file name.') parser.add_argument('--max_timesteps', type=int, default=0, help='') pars = vars(parser.parse_args()) starting_time = timeit.default_timer() experiment_series_dir = pars['ensemble_dir'] make_figure_dir(experiment_series_dir) lambda_axis, duration_axis, ensemble_meta = get_axes_and_dirs(experiment_series_dir) num_cpus = pars['num_cpus'] time_frame = max( [simulation_meta['sim_duration'] for simulation_meta in list(ensemble_meta['scaled_scenarios'].values())]) if pars['max_timesteps'] > 0: time_frame = min(time_frame, pars['max_timesteps']) if pars['variable'] == 'all': variables = ['consumption', 'consumption_price', 'consumption_value', 'demand', 'demand_price', 'demand_value', 'production', 'production_price', 'production_value', 'storage', 'storage_price', 'storage_value', 'total_loss', 'total_loss_price', 'total_loss_value', 'total_value_loss', 'offer_price', 'expected_offer_price', 'expected_production', 'expected_production_price', 'expected_production_value', 'communicated_possible_production', 'communicated_possible_production_price', 'communicated_possible_production_value', 'unit_production_costs', 'total_production_costs', 'total_revenue', 'direct_loss', 'direct_loss_price', 'direct_loss_value', 'forcing', 'incoming_demand', 'incoming_demand_price', 'incoming_demand_value', 'production_capacity', 'desired_production_capacity', 'possible_production_capacity'] elif pars['variable'] == 'set_harvey': variables = ['production', 'production_value', 'incoming_demand', 'incoming_demand_value', 'consumption', 'consumption_value', 'forcing'] else: variables = pars['variable'].split('+') if pars['region'] == 'all': regions = set(list(WORLD_REGIONS.keys()) + WORLD_REGIONS['WORLD']) elif pars['region'] == 'set_1': regions = set( list(WORLD_REGIONS.keys()) + ['DEU', 'USA', 'CHN', 'CAN', 'ESP', 'FRA', 'GBR', 'IND', 'JPN', 'MEX', 'POL', 'HUN']) elif pars['region'] == 'set_sandy': regions = set(list(WORLD_REGIONS['USA']) + ['DEU', 'CHN', 'EUR', 'WORLD', 'ROW', 'USA_REST_SANDY']) else: regions = pars['region'].split('+') if pars['sector'] == 'all': sectors = [i for i in SECTOR_GROUPS['ALLSECTORS']] + ['PRIVSECTORS'] else: sectors = pars['sector'].split('+') region_args = get_regions_dict(regions) sector_args = get_sectors_dict(sectors) data_reader = DataReader(variables, ensemble_meta, experiment_series_dir, region_args, sector_args, lambda_axis, duration_axis, time_frame, num_cpus) data = data_reader.go() if pars['output'] == '': filename = "{}vars_{}regns_{}sects_{}lambda_{}duration".format(len(variables), len(region_args), len(sector_args), len(lambda_axis), len(duration_axis)) else: filename = pars['output'] pickle_data(data.data_capsule, 'data_cap', experiment_series_dir, filename) runtime = timeit.default_timer() - starting_time print("Runtime was {}".format(runtime))
{"hexsha": "14e21e6dcda5c8e2fb44e01184231e82f9ec5a1b", "size": 10258, "ext": "py", "lang": "Python", "max_stars_repo_path": "analysis/DataReader.py", "max_stars_repo_name": "rmiddelanis/harvey_scaling", "max_stars_repo_head_hexsha": "a94064996fb200c26a90482cc63804dcdc3cf6dd", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "analysis/DataReader.py", "max_issues_repo_name": "rmiddelanis/harvey_scaling", "max_issues_repo_head_hexsha": "a94064996fb200c26a90482cc63804dcdc3cf6dd", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "analysis/DataReader.py", "max_forks_repo_name": "rmiddelanis/harvey_scaling", "max_forks_repo_head_hexsha": "a94064996fb200c26a90482cc63804dcdc3cf6dd", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 56.0546448087, "max_line_length": 120, "alphanum_fraction": 0.6175667771, "include": true, "reason": "import numpy", "num_tokens": 2137}
import abc import math from os import name, stat from typing import overload import tensorflow as tf from src.utils import sigma2snr, snr2sigma import tensorflow_probability as tfp tfd = tfp.distributions from tensor_annotations import tensorflow as ttf from tensor_annotations import axes import numpy as np from src.channelcoding.dataclasses import AWGNSettings, AdditiveTonAWGNSettings, ChannelSettings, NonIIDMarkovianGaussianAsAWGNSettings, UnknownChannelSettings from .codes import Code from .types import Time, Batch, Channels, A0, A1, A2, A3 def scale_constraint(data): return data * 2. - 1. def identity_constraint(data): return data class Channel(Code): @overload def log_likelihood(self, msg: ttf.Tensor3[Batch, Time, Channels], outputs: ttf.Tensor1[Channels]) -> ttf.Tensor2[Batch, Time]: ... @overload def log_likelihood(self, msg: ttf.Tensor3[Batch, Time, Channels], outputs: ttf.Tensor2[A0, Channels]) -> ttf.Tensor3[Batch, Time, A0]: ... @overload def log_likelihood(self, msg: ttf.Tensor3[Batch, Time, Channels], outputs: ttf.Tensor3[A0, A1, Channels]) -> ttf.Tensor4[Batch, Time, A0, A1]: ... @overload def log_likelihood(self, msg: ttf.Tensor3[Batch, Time, Channels], outputs: ttf.Tensor4[A0, A1, A2, Channels]) -> ttf.Tensor5[Batch, Time, A0, A1, A2]: ... @overload def log_likelihood(self, msg: ttf.Tensor3[Batch, Time, Channels], outputs: ttf.Tensor5[A0, A1, A2, A3, Channels]) -> ttf.Tensor6[Batch, Time, A0, A1, A2, A3]: ... @abc.abstractmethod def log_likelihood(self, msg, outputs): pass @abc.abstractmethod def logit_posterior(self, msg: ttf.Tensor3[Batch, Time, Channels]) -> ttf.Tensor3[Batch, Time, Channels]: pass @property def num_input_channels(self): return None @property def num_output_channels(self): return None def settings(self) -> ChannelSettings: return UnknownChannelSettings(self.name) class NoisyChannel(Channel): @abc.abstractstaticmethod def noise_func(self, shape, *args): pass class AWGN(NoisyChannel): def __init__(self, sigma, power_constraint=scale_constraint, name="AWGN"): super().__init__(name) self.sigma = sigma self.variance = self.sigma ** 2 self.power_constraint = power_constraint # self._noise_constant = -tf.math.log(self.sigma * tf.math.sqrt(2 * math.pi)) def noise_func(self, shape): return tf.random.normal(shape, stddev=self.sigma) def call(self, msg: ttf.Tensor3[Batch, Time, Channels]) -> ttf.Tensor3[Batch, Time, Channels]: # return (2. * msg - 1.) + self.noise_func(tf.shape(msg)) return self.power_constraint(msg) + self.noise_func(tf.shape(msg)) def log_likelihood(self, msg, outputs): msg_shape = tf.shape(msg) expanded_msg_shape = tf.concat([msg_shape[0:2], tf.ones((tf.rank(outputs) - 1,), dtype=tf.int32), msg_shape[2:3]], axis=0) msg = tf.reshape(msg, expanded_msg_shape) outputs = outputs[None, None] # Compute ln(Chi) values # chi_values[k, i, t] = log p(Y[k] | s[k] = i, s[k+1] = next_states[i, t]) # B x K x 1 x 1 x ... x n - 1 x 1 x A0 x A1 x ... x n, reduce on last axis, result is B x K x A0 x A1 x ... # square_noise_sum = tf.math.reduce_sum(tf.square(msg - (2. * outputs - 1.)), axis=-1) square_noise_sum = tf.math.reduce_sum(tf.square(msg - self.power_constraint(outputs)), axis=-1) # chi_values = -tf.math.log(noise_std * tf.math.sqrt(2 * math.pi)) - 1 / (2 * noise_variance) * square_noise_sum # the first log term will cancel out in calculation of LLRs so I can drop it # return self._noise_constant - 1. / (2 * self.variance) * square_noise_sum return - 1. / (2 * self.variance) * square_noise_sum # @tf.function def logit_posterior(self, msg: ttf.Tensor3[Batch, Time, Channels]) -> ttf.Tensor3[Batch, Time, Channels]: return 2 * msg / self.variance def settings(self) -> ChannelSettings: return AWGNSettings.from_sigma(self.sigma, self.name) class AdditiveTonAWGN(AWGN): def __init__(self, sigma, v=3, power_constraint=scale_constraint, name: str = "AdditiveTonAWGN"): super().__init__(sigma, power_constraint=power_constraint, name=name) self.v = v self.distribution = tfp.distributions.StudentT(df=self.v, loc=0, scale=1) def noise_func(self, shape): return self.sigma * tf.sqrt((self.v - 2) / self.v) * self.distribution.sample(shape) def settings(self) -> ChannelSettings: return AdditiveTonAWGNSettings.from_sigma(self.sigma, name) # p_gg = 0.8 # stay in good state # p_bb = 0.8 # bsc_k = snr_db2sigma(snr_sigma2db(this_sigma) + 1) # accuracy on good state # bsc_h = snr_db2sigma(snr_sigma2db(this_sigma) - 1) # accuracy on good state # fwd_noise = np.zeros(noise_shape) # for batch_idx in range(noise_shape[0]): # for code_idx in range(noise_shape[2]): # good = True # for time_idx in range(noise_shape[1]): # if good: # if test_sigma == 'default': # fwd_noise[batch_idx,time_idx, code_idx] = bsc_k[batch_idx,time_idx, code_idx] # else: # fwd_noise[batch_idx,time_idx, code_idx] = bsc_k # good = np.random.random()<p_gg # elif not good: # if test_sigma == 'default': # fwd_noise[batch_idx,time_idx, code_idx] = bsc_h[batch_idx,time_idx, code_idx] # else: # fwd_noise[batch_idx,time_idx, code_idx] = bsc_h # good = np.random.random()<p_bb # else: # print('bad!!! something happens') # fwd_noise = torch.from_numpy(fwd_noise).type(torch.FloatTensor)* torch.randn(noise_shape, dtype=torch.float) class NonIIDMarkovianGaussianAsAWGN(AWGN): def __init__(self, sigma, block_len, p_gb=0.8, p_bg=0.8, power_constraint=scale_constraint, name="NonIIDMarkovianGaussianAsAWGN"): super().__init__(sigma, power_constraint=power_constraint, name=name) self.p_gb = p_gb self.p_bg = p_bg self.block_len = block_len self.snr = sigma2snr(self.sigma) self.good_snr = self.snr + 1 self.bad_snr = self.snr - 1 self.good_sigma = snr2sigma(self.good_snr) self.bad_sigma = snr2sigma(self.bad_snr) # Always start in good state; good = 1, bad = 0 self.initial_distribution = tfd.Categorical(probs=[0.0, 1.0]) self.transition_distribution = tfd.Categorical(probs=[[1 - p_bg, p_bg], [p_gb, 1 - p_gb]]) self.observation_distribution = tfd.Normal(loc=[0., 0.], scale=[self.bad_sigma, self.good_sigma]) self.distribution = tfd.HiddenMarkovModel(self.initial_distribution, self.transition_distribution, self.observation_distribution, num_steps=self.block_len) def noise_func(self, shape): # shape[1] corresponds to time. we sample Batch x Channels x Time then swap channels and time axes return tf.transpose(self.distribution.sample((shape[0], shape[2])), perm=[0, 2, 1]) def settings(self) -> NonIIDMarkovianGaussianAsAWGNSettings: return NonIIDMarkovianGaussianAsAWGNSettings( sigma=self.sigma, good_sigma=self.good_sigma, bad_sigma=self.bad_sigma, snr=self.snr, good_snr=self.good_snr, bad_snr=self.bad_snr, p_gb=self.p_gb, p_bg=self.p_bg, block_len=self.block_len, name=self.name )
{"hexsha": "905050b3abeeae342b8297ea323dbc182a46a123", "size": 7928, "ext": "py", "lang": "Python", "max_stars_repo_path": "turbo-codes/src/channelcoding/channels.py", "max_stars_repo_name": "tripods-xai/isit-2022", "max_stars_repo_head_hexsha": "024a0ccb59f7d4b2c9e88ef96d4a9c57712d6dfd", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2022-02-23T14:59:14.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-23T14:59:14.000Z", "max_issues_repo_path": "turbo-codes/src/channelcoding/channels.py", "max_issues_repo_name": "tripods-xai/isit-2022", "max_issues_repo_head_hexsha": "024a0ccb59f7d4b2c9e88ef96d4a9c57712d6dfd", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "turbo-codes/src/channelcoding/channels.py", "max_forks_repo_name": "tripods-xai/isit-2022", "max_forks_repo_head_hexsha": "024a0ccb59f7d4b2c9e88ef96d4a9c57712d6dfd", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 45.0454545455, "max_line_length": 166, "alphanum_fraction": 0.632568113, "include": true, "reason": "import numpy", "num_tokens": 2076}
// Copyright (c) 2018-2019 The Unit-e developers // Distributed under the MIT software license, see the accompanying // file COPYING or http://www.opensource.org/licenses/mit-license.php. #include <proposer/block_builder.h> #include <consensus/tx_verify.h> #include <consensus/validation.h> #include <test/test_unite.h> #include <test/test_unite_mocks.h> #include <boost/test/unit_test.hpp> #include <algorithm> BOOST_AUTO_TEST_SUITE(tx_verify_tests) BOOST_AUTO_TEST_CASE(check_tx_inputs_no_haz_coins) { mocks::CoinsViewMock utxos; utxos.default_have_inputs = false; CTransaction tx; CValidationState validation_state; CAmount fees; const bool result = Consensus::CheckTxInputs(tx, validation_state, utxos, 2, fees); BOOST_CHECK(!result); BOOST_CHECK(!validation_state.IsValid()); BOOST_CHECK_EQUAL(validation_state.GetRejectCode(), REJECT_INVALID); BOOST_CHECK_EQUAL(validation_state.GetRejectReason(), "bad-txns-inputs-missingorspent"); } BOOST_AUTO_TEST_CASE(check_tx_inputs_no_reward) { const CTxIn meta_input; BOOST_REQUIRE(meta_input.prevout.IsNull()); const uint256 some_txid = uint256S("4623a9438473459c466ea4fe87b5a614362e08c47454cf59646e49c5759cb60d"); const CTxIn staking_input(some_txid, 0); CTransaction tx = [&] { CMutableTransaction tx; tx.SetType(TxType::COINBASE); tx.vin = {meta_input, staking_input}; return tx; }(); mocks::CoinsViewMock utxos; utxos.default_coin = Coin(CTxOut(21, CScript()), 1, TxType::REGULAR); CValidationState validation_state; CAmount fees; const bool result = Consensus::CheckTxInputs(tx, validation_state, utxos, 2, fees); BOOST_CHECK(!result); BOOST_CHECK(!validation_state.IsValid()); BOOST_CHECK_EQUAL(validation_state.GetRejectCode(), REJECT_INVALID); BOOST_CHECK_EQUAL(validation_state.GetRejectReason(), "bad-cb-no-reward"); } BOOST_AUTO_TEST_CASE(check_tx_inputs_does_not_access_coinbase_meta_input) { const CTxIn meta_input; BOOST_REQUIRE(meta_input.prevout.IsNull()); const uint256 some_txid = uint256S("4623a9438473459c466ea4fe87b5a614362e08c47454cf59646e49c5759cb60d"); const CTxIn staking_input(some_txid, 0); CTransaction tx = [&] { CMutableTransaction tx; tx.SetType(TxType::COINBASE); tx.vin = {meta_input, staking_input}; tx.vout = {CTxOut(21, CScript())}; return tx; }(); std::vector<COutPoint> coins_accessed; mocks::CoinsViewMock utxos; utxos.default_coin = Coin(CTxOut(21, CScript()), 1, TxType::REGULAR); utxos.access_coin = [&](const COutPoint &coin) -> const Coin & { coins_accessed.emplace_back(coin); return utxos.default_coin; }; CValidationState validation_state; CAmount fees; Consensus::CheckTxInputs(tx, validation_state, utxos, 2, fees); // check vin[0] was not tried to be retrieved from coins view BOOST_CHECK(std::find(coins_accessed.begin(), coins_accessed.end(), meta_input.prevout) == coins_accessed.end()); // check vin[1] was tried to be retrieved from coins view BOOST_CHECK(std::find(coins_accessed.begin(), coins_accessed.end(), staking_input.prevout) != coins_accessed.end()); } BOOST_AUTO_TEST_CASE(check_tx_inputs_rejects_coinbase_that_spends_too_little) { const CTxIn meta_input; BOOST_REQUIRE(meta_input.prevout.IsNull()); const uint256 some_txid = uint256S("4623a9438473459c466ea4fe87b5a614362e08c47454cf59646e49c5759cb60d"); const CTxIn staking_input(some_txid, 0); const CAmount reward = 21; const CAmount stake_in = 19; const CAmount stake_out = stake_in - 1; CTransaction tx = [&] { CMutableTransaction tx; tx.SetType(TxType::COINBASE); tx.vin = {meta_input, staking_input}; tx.vout = {CTxOut(reward, CScript()), CTxOut(stake_out, CScript())}; return tx; }(); mocks::CoinsViewMock utxos; utxos.default_coin = Coin(CTxOut(stake_in, CScript()), 1, TxType::REGULAR); CValidationState validation_state; CAmount fees; const bool result = Consensus::CheckTxInputs(tx, validation_state, utxos, 2, fees); BOOST_CHECK(!result); BOOST_CHECK(!validation_state.IsValid()); BOOST_CHECK_EQUAL(validation_state.GetRejectCode(), REJECT_INVALID); BOOST_CHECK_EQUAL(validation_state.GetRejectReason(), "bad-cb-spends-too-little"); } BOOST_AUTO_TEST_CASE(check_tx_inputs_rejects_coinbase_that_spends_too_much) { const CTxIn meta_input; BOOST_REQUIRE(meta_input.prevout.IsNull()); const uint256 some_txid = uint256S("4623a9438473459c466ea4fe87b5a614362e08c47454cf59646e49c5759cb60d"); const CTxIn staking_input(some_txid, 0); const CAmount reward = 21; const CAmount stake_in = 19; const CAmount stake_out = stake_in + 1; CTransaction tx = [&] { CMutableTransaction tx; tx.SetType(TxType::COINBASE); tx.vin = {meta_input, staking_input}; tx.vout = {CTxOut(reward, CScript()), CTxOut(stake_out, CScript())}; return tx; }(); mocks::CoinsViewMock utxos; utxos.default_coin = Coin(CTxOut(stake_in, CScript()), 1, TxType::REGULAR); CValidationState validation_state; CAmount fees; const bool result = Consensus::CheckTxInputs(tx, validation_state, utxos, 2, fees); BOOST_CHECK(!result); BOOST_CHECK(!validation_state.IsValid()); BOOST_CHECK_EQUAL(validation_state.GetRejectCode(), REJECT_INVALID); BOOST_CHECK_EQUAL(validation_state.GetRejectReason(), "bad-cb-spends-too-much"); } BOOST_AUTO_TEST_SUITE_END()
{"hexsha": "a907583a9260116c0759e8ca52ffde9c1d304554", "size": 5358, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "src/test/tx_verify_tests.cpp", "max_stars_repo_name": "frolosofsky/unit-e", "max_stars_repo_head_hexsha": "d3d12508b915986841bd19c4dee9e50dd662a112", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/test/tx_verify_tests.cpp", "max_issues_repo_name": "frolosofsky/unit-e", "max_issues_repo_head_hexsha": "d3d12508b915986841bd19c4dee9e50dd662a112", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/test/tx_verify_tests.cpp", "max_forks_repo_name": "frolosofsky/unit-e", "max_forks_repo_head_hexsha": "d3d12508b915986841bd19c4dee9e50dd662a112", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.6981132075, "max_line_length": 118, "alphanum_fraction": 0.758118701, "num_tokens": 1523}
(* Copyright (C) 2020 Susi Lehtola This Source Code Form is subject to the terms of the Mozilla Public License, v. 2.0. If a copy of the MPL was not distributed with this file, You can obtain one at http://mozilla.org/MPL/2.0/. *) (* type: mgga_exc *) (* prefix: mgga_x_jk_params *params; assert(p->params != NULL); params = (mgga_x_jk_params * ) (p->params); *) $include "gga_x_b88.mpl" (* equation 11 *) y := (x,u) -> x^2 - u: (* equation 5 *) gBecke := x -> b88_f(x)-1: (* equation 24 *) jk_f := (x,u,t) -> 1 + gBecke(x)/(1 + 2*y(x,u)/x^2): f := (rs, z, xt, xs0, xs1, u0, u1, t0, t1) -> mgga_exchange(jk_f, rs, z, xs0, xs1, u0, u1, t0, t1):
{"hexsha": "6e8101cb218a017d25bbaf8c7fa1e097e5aa3d6e", "size": 662, "ext": "mpl", "lang": "Maple", "max_stars_repo_path": "libxc-5.1.6/maple/mgga_exc/mgga_x_jk.mpl", "max_stars_repo_name": "pwang234/lsms", "max_stars_repo_head_hexsha": "6044153b6138512093e457bdc0c15c699c831778", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 16, "max_stars_repo_stars_event_min_datetime": "2018-04-03T15:35:47.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-01T03:19:23.000Z", "max_issues_repo_path": "libxc-5.1.6/maple/mgga_exc/mgga_x_jk.mpl", "max_issues_repo_name": "pwang234/lsms", "max_issues_repo_head_hexsha": "6044153b6138512093e457bdc0c15c699c831778", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 8, "max_issues_repo_issues_event_min_datetime": "2019-07-30T13:59:18.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-31T17:43:35.000Z", "max_forks_repo_path": "libxc-5.1.6/maple/mgga_exc/mgga_x_jk.mpl", "max_forks_repo_name": "pwang234/lsms", "max_forks_repo_head_hexsha": "6044153b6138512093e457bdc0c15c699c831778", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 9, "max_forks_repo_forks_event_min_datetime": "2018-06-30T00:30:48.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-31T09:14:29.000Z", "avg_line_length": 23.6428571429, "max_line_length": 68, "alphanum_fraction": 0.6042296073, "num_tokens": 253}
""" Created: May 2018 @author: JerryX Find more : https://www.zhihu.com/people/xu-jerry-82 """ import numpy as np import sys import os curPath = os.path.abspath(os.path.dirname(__file__)) sys.path.append(curPath) from xDLUtils import Tools #Tools = xnnUtils.Tools() # 全连接类 class FCLayer(object): def __init__(self, miniBatchesSize, i_size, o_size, activator, optimizerCls,optmParams, needReshape,dataType,init_w): # 初始化超参数 self.miniBatchesSize = miniBatchesSize self.i_size = i_size self.o_size = o_size self.activator = activator self.optimizerObj = optimizerCls(optmParams, dataType) # 是否将N,T,D输入,先拉伸成N,T*D,再做仿射变换 # 在letNet-5中,可以将pooling层输出3维拉成2维 # 在RNN中,可以将N v M中的N个T时刻输出D维向量, # 变成N,T*D ,再仿射变换 为 N*D' 规格, 起到 N->M映射的效果 self.needReshape = needReshape self.dataType = dataType self.w = init_w * np.random.randn(i_size, o_size).astype(dataType) self.b = np.zeros(o_size, dataType) self.out = [] self.deltaPrev = [] # 上一层激活后的误差输出 self.deltaOri = [] # 本层原始误差输出 self.shapeOfOriIn = () # 原始输入维度 self.inputReshaped =[] # 预测时前向传播 def inference(self, input): self.shapeOfOriIn = input.shape self.out = self.fp(input) return self.out # 前向传播,激活后再输出 def fp(self, input): # 拉伸变形处理 self.shapeOfOriIn = input.shape self.inputReshaped = input if self.needReshape is False else input.reshape(input.shape[0],-1) self.out = self.activator.activate(Tools.matmul(self.inputReshaped, self.w) + self.b) ####debug#### # np.savetxt('G:/0tmp/0debug/x.csv',self.inputReshaped[0]) # np.savetxt('G:/0tmp/0debug/w_c1.csv', self.w[:,0]) # np.savetxt('G:/0tmp/0debug/w_c2.csv', self.w[:, 1]) # np.savetxt('G:/0tmp/0debug/out.csv', self.out[0]) ####debug end##### return self.out # 反向传播方法(误差和权参) def bp(self, input, delta, lrt): self.deltaOri = self.activator.bp(delta, self.out) # # 恢复拉伸变形 # self.deltaOri = deltaOri_reshaped if self.needReshape is False else deltaOri_reshaped.reshape(self.shapeOfOriIn) self.bpDelta() self.bpWeights(input, lrt) return self.deltaPrev # 输出误差反向传播至上一层 def bpDelta(self): deltaPrevReshapped = Tools.matmul(self.deltaOri, self.w.T) self.deltaPrev = deltaPrevReshapped if self.needReshape is False else deltaPrevReshapped.reshape(self.shapeOfOriIn) return self.deltaPrev # 计算反向传播权重梯度w,b def bpWeights(self, input, lrt): # dw = Tools.matmul(input.T, self.deltaOri) dw = Tools.matmul(self.inputReshaped.T, self.deltaOri) db = np.sum(self.deltaOri, axis=0, keepdims=True).reshape(self.b.shape) weight = (self.w,self.b) dweight = (dw,db) # 元组按引用传递,值在方法内部已被更新 self.optimizerObj.getUpdWeights(weight,dweight, lrt)
{"hexsha": "8fc2034f0644b29f89328813362022468e3df07b", "size": 3078, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/xDLbase/fc.py", "max_stars_repo_name": "AskyJx/xDeepLearning", "max_stars_repo_head_hexsha": "98d875a34c6b4f4a51dcb825998028fd36260848", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 20, "max_stars_repo_stars_event_min_datetime": "2019-03-21T18:16:00.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-21T12:10:04.000Z", "max_issues_repo_path": "src/xDLbase/fc.py", "max_issues_repo_name": "AskyJx/xDeepLearning", "max_issues_repo_head_hexsha": "98d875a34c6b4f4a51dcb825998028fd36260848", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2019-07-17T10:09:23.000Z", "max_issues_repo_issues_event_max_datetime": "2019-08-10T11:38:29.000Z", "max_forks_repo_path": "src/xDLbase/fc.py", "max_forks_repo_name": "AskyJx/xDeepLearning", "max_forks_repo_head_hexsha": "98d875a34c6b4f4a51dcb825998028fd36260848", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 5, "max_forks_repo_forks_event_min_datetime": "2019-06-03T15:26:55.000Z", "max_forks_repo_forks_event_max_datetime": "2020-12-09T03:06:01.000Z", "avg_line_length": 34.5842696629, "max_line_length": 124, "alphanum_fraction": 0.6130604288, "include": true, "reason": "import numpy", "num_tokens": 1001}
import matplotlib as mpl import matplotlib.pyplot as plt import matplotlib.cm as cm from skimage.transform import pyramid_gaussian from PIL import Image import numpy as np from struct import unpack import os import sys import re import gc import fs_utility from logger import Logger log = Logger(__name__) log.logger.propagate = False def fill_ico_subimage(depth_data_list_, subimage_idx_list): """ replace missed subimage with zero matrix. """ depth_data_list = [np.zeros_like(depth_data_list_[0])] * 20 for subimage_index in range(len(subimage_idx_list)): subimage_face_idx = subimage_idx_list[subimage_index] depth_data_list[subimage_face_idx] = depth_data_list_[subimage_index] return depth_data_list def depth_ico_visual_save(depth_data_list_, output_path, subimage_idx_list=None): """save the visualized depth map array to image file with value-bar. :param dapthe_data: The depth data. :type dapthe_data: numpy :param output_path: the absolute path of output image. :type output_path: str :param subimage_idx_list: available subimages index list. :type subimage_idx_list: list """ # get vmin and vmax # for dispmap in depth_data_list: # if vmin_ > np.amin(dispmap): # vmin_ = np.amin(dispmap) # if vmax_ < np.amax(dispmap): # vmax_ = np.amax(dispmap) vmin_ = 0 vmax_ = 0 dispmap_array = np.concatenate(depth_data_list_).flatten() vmin_idx = int(dispmap_array.size * 0.05) vmax_idx = int(dispmap_array.size * 0.95) vmin_ = np.partition(dispmap_array, vmin_idx)[vmin_idx] vmax_ = np.partition(dispmap_array, vmax_idx)[vmax_idx] # add blank image to miss subimage to fill the sub-image array depth_data_list = None if len(depth_data_list_) != 20 \ and subimage_idx_list is not None \ and len(depth_data_list_) == len(subimage_idx_list): log.debug("The ico's sub-image size is {}, fill blank sub-images.".format(len(depth_data_list_))) # depth_data_list = [np.zeros_like(depth_data_list_[0])] * 20 # for subimage_index in range(len(subimage_idx_list)): # subimage_face_idx = subimage_idx_list[subimage_index] # depth_data_list[subimage_face_idx] = depth_data_list_[subimage_index] depth_data_list = fill_ico_subimage(depth_data_list_, subimage_idx_list) elif len(depth_data_list_) == 20: depth_data_list = depth_data_list_ else: raise log.error("The sub-image is not completed.") # draw image figure, axes = plt.subplots(4, 5) counter = 0 for row_index in range(0, 4): for col_index in range(0, 5): axes[row_index, col_index].get_xaxis().set_visible(False) axes[row_index, col_index].get_yaxis().set_visible(False) # add sub caption axes[row_index, col_index].set_title(str(counter)) counter = counter + 1 # dispmap_index = row_index * 5 + col_index im = axes[row_index, col_index].imshow(depth_data_list[dispmap_index], cmap=cm.jet, vmin=vmin_, vmax=vmax_) figure.tight_layout() plt.colorbar(im, ax=axes.ravel().tolist()) plt.savefig(output_path, dpi=150) # plt.show() plt.close(figure) def depth_visual_save(depth_data, output_path, overwrite=True): """save the visualized depth map to image file with value-bar. :param dapthe_data: The depth data. :type dapthe_data: numpy :param output_path: the absolute path of output image. :type output_path: str """ depth_data_temp = depth_data.astype(np.float64) if fs_utility.exist(output_path, 1) and overwrite: log.warn("{} exist.".format(output_path)) fs_utility.file_rm(output_path) # draw image fig = plt.figure() plt.subplots_adjust(left=0, bottom=0, right=0.1, top=0.1, wspace=None, hspace=None) ax = fig.add_subplot(111) ax.get_xaxis().set_visible(False) ax.get_yaxis().set_visible(False) fig.tight_layout() im = ax.imshow(depth_data_temp, cmap="turbo") cbar = ax.figure.colorbar(im, ax=ax) plt.savefig(output_path, dpi=150) # plt.imsave(output_path, dapthe_data_temp, cmap="turbo") plt.close(fig) def depth_visual(depth_data): """ visualize the depth map """ min = np.min(depth_data) max = np.max(depth_data) norm = mpl.colors.Normalize(vmin=min, vmax=max) cmap = plt.get_cmap('jet') m = cm.ScalarMappable(norm=norm, cmap=cmap) return (m.to_rgba(depth_data)[:, :, :3] * 255).astype(np.uint8) def rgb2dispmap(image_filepath, pytorch_hub=True): """ Estimate dispmap from rgb image. :param image_filepath: the rgb image filepath :type image_filepath: str :param pytorch_hub: which module should use, defaults to True :type pytorch_hub: bool, optional :return: MiDaS estimated dispmap :rtype: numpy """ depthmap_data = None if pytorch_hub: log.debug("use PyTorch Hub MiDaS.") depthmap_data = MiDaS_torch_hub_file(image_filepath) else: log.debug("use local MiDaS.") # add local MiDas to python path dir_scripts = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) sys.path.append(os.path.join(dir_scripts, "../../MiDaS/")) from MiDaS import MiDaS_utils from MiDaS.monodepth_net import MonoDepthNet from MiDaS.run import run_depth image_data = np.asarray(Image.open(image_filepath)[..., :3]) image_data = image_data[np.newaxis, :, :, [2, 0, 1]] MiDaS_module_filepath = dir_scripts + '../../MiDas/model.pt' if os.path.exists(MiDaS_module_filepath): log.error("MiDaS local module {} does not exist.".format(MiDaS_module_filepath)) depthmap_data = run_depth(image_data, MiDaS_module_filepath, MonoDepthNet, MiDaS_utils)[0] return depthmap_data def MiDaS_torch_hub_data(rgb_image_data_list, persp_monodepth, use_large_model=True): """Estimation the single RGB image's depth with MiDaS downloading from Torch Hub. reference: https://pytorch.org/hub/intelisl_midas_v2/ :param rgb_image_path: the RGB image file path. :type rgb_image_path: str :param use_large_model: the MiDaS model type. :type use_large_model: bool, optional """ import torch # 1)initial PyTorch run-time environment if use_large_model: if persp_monodepth == "midas2": midas = torch.hub.load("intel-isl/MiDaS", "MiDaS") if persp_monodepth == "midas3": midas = torch.hub.load("intel-isl/MiDaS", "DPT_Large") else: if persp_monodepth == "midas2": midas = torch.hub.load("intel-isl/MiDaS", "MiDaS_small") if persp_monodepth == "midas3": midas = torch.hub.load("intel-isl/MiDaS", "DPT_Hybrid") device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu") midas.to(device) midas.eval() midas_transforms = torch.hub.load("intel-isl/MiDaS", "transforms") if use_large_model: transform = midas_transforms.default_transform else: transform = midas_transforms.small_transform disparity_map_list = [] for index in range(0, len(rgb_image_data_list)): img = rgb_image_data_list[index] input_batch = transform(img).to(device) with torch.no_grad(): prediction = midas(input_batch) prediction = torch.nn.functional.interpolate( prediction.unsqueeze(1), size=img.shape[:2], mode="bicubic", align_corners=False, ).squeeze() output = prediction.cpu().numpy() disparity_map_list.append(output) del output del input_batch del prediction torch.cuda.empty_cache() if index % 10 ==0: log.debug("MiDaS estimate {} rgb image's disparity map.".format(index)) del midas gc.collect() torch.cuda.empty_cache() return disparity_map_list def MiDaS_torch_hub_file(rgb_image_path, use_large_model=True): """Estimation the single RGB image's depth with MiDaS downloading from Torch Hub. reference: https://pytorch.org/hub/intelisl_midas_v2/ :param rgb_image_path: the RGB image file path. :type rgb_image_path: str :param use_large_model: the MiDaS model type. :type use_large_model: bool, optional """ import cv2 import torch # import urllib.request # import matplotlib.pyplot as plt # url, filename = ("https://github.com/pytorch/hub/raw/master/images/dog.jpg", "dog.jpg") # urllib.request.urlretrieve(url, filename) # use_large_model = True if use_large_model: midas = torch.hub.load("intel-isl/MiDaS", "MiDaS") else: midas = torch.hub.load("intel-isl/MiDaS", "MiDaS_small") device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu") midas.to(device) midas.eval() midas_transforms = torch.hub.load("intel-isl/MiDaS", "transforms") if use_large_model: transform = midas_transforms.default_transform else: transform = midas_transforms.small_transform img = cv2.imread(rgb_image_path) img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) input_batch = transform(img).to(device) with torch.no_grad(): prediction = midas(input_batch) prediction = torch.nn.functional.interpolate( prediction.unsqueeze(1), size=img.shape[:2], mode="bicubic", align_corners=False, ).squeeze() output = prediction.cpu().numpy() # plt.imshow(output) # plt.show() return output def read_dpt(dpt_file_path): """read depth map from *.dpt file. :param dpt_file_path: the dpt file path :type dpt_file_path: str :return: depth map data :rtype: numpy """ TAG_FLOAT = 202021.25 # check for this when READING the file ext = os.path.splitext(dpt_file_path)[1] assert len(ext) > 0, ('readFlowFile: extension required in fname %s' % dpt_file_path) assert ext == '.dpt', exit('readFlowFile: fname %s should have extension ''.flo''' % dpt_file_path) fid = None try: fid = open(dpt_file_path, 'rb') except IOError: print('readFlowFile: could not open %s', dpt_file_path) tag = unpack('f', fid.read(4))[0] width = unpack('i', fid.read(4))[0] height = unpack('i', fid.read(4))[0] assert tag == TAG_FLOAT, ('readFlowFile(%s): wrong tag (possibly due to big-endian machine?)' % dpt_file_path) assert 0 < width and width < 100000, ('readFlowFile(%s): illegal width %d' % (dpt_file_path, width)) assert 0 < height and height < 100000, ('readFlowFile(%s): illegal height %d' % (dpt_file_path, height)) # arrange into matrix form depth_data = np.fromfile(fid, np.float32) depth_data = depth_data.reshape(height, width) fid.close() return depth_data def read_exr(exp_file_path): """Read depth map from EXR file :param exp_file_path: file path :type exp_file_path: str :return: depth map data :rtype: numpy """ import array import OpenEXR import Imath # Open the input file file = OpenEXR.InputFile(exp_file_path) # Compute the size dw = file.header()['dataWindow'] sz = (dw.max.x - dw.min.x + 1, dw.max.y - dw.min.y + 1) # Read the three color channels as 32-bit floats FLOAT = Imath.PixelType(Imath.PixelType.FLOAT) (R, G, B) = [array.array('f', file.channel(Chan, FLOAT)).tolist() for Chan in ("R", "G", "B")] # R,G,B channel is same R_np = np.array(R).reshape((sz[1], sz[0])) return R_np def read_pfm(path): """Read pfm file. :param path: the PFM file's path. :type path: str :return: the depth map array and scaler of depth :rtype: tuple: (data, scale) """ with open(path, "rb") as file: color = None width = None height = None scale = None endian = None header = file.readline().rstrip() if header.decode("ascii") == "PF": color = True elif header.decode("ascii") == "Pf": color = False else: log.error("Not a PFM file: " + path) dim_match = re.match(r"^(\d+)\s(\d+)\s$", file.readline().decode("ascii")) if dim_match: width, height = list(map(int, dim_match.groups())) else: log.error("Malformed PFM header.") scale = float(file.readline().decode("ascii").rstrip()) if scale < 0: # little-endian endian = "<" scale = -scale else: # big-endian endian = ">" data = np.fromfile(file, endian + "f") shape = (height, width, 3) if color else (height, width) data = np.reshape(data, shape) data = np.flipud(data) return data, scale def write_pfm(path, image, scale=1): """Write depth data to pfm file. :param path: pfm file path :type path: str :param image: depth data :type image: numpy :param scale: Scale, defaults to 1 :type scale: int, optional """ if image.dtype.name != "float32": #raise Exception("Image dtype must be float32.") log.warn("The depth map data is {}, convert to float32 and save to pfm format.".format(image.dtype.name)) image_ = image.astype(np.float32) else : image_ = image image_ = np.flipud(image_) color = None if len(image_.shape) == 3 and image_.shape[2] == 3: # color image color = True elif len(image_.shape) == 2 or len(image_.shape) == 3 and image_.shape[2] == 1: # greyscale color = False else: log.error("Image must have H x W x 3, H x W x 1 or H x W dimensions.") # raise Exception("Image must have H x W x 3, H x W x 1 or H x W dimensions.") with open(path, "wb") as file: file.write("PF\n" if color else "Pf\n".encode()) file.write("%d %d\n".encode() % (image_.shape[1], image_.shape[0])) endian = image_.dtype.byteorder if endian == "<" or endian == "=" and sys.byteorder == "little": scale = -scale file.write("%f\n".encode() % scale) image_.tofile(file) def depthmap_histogram(data): """ Visualize the pixel value distribution. """ data_max = int(np.max(data)) data_min = int(np.min(data) + 0.5) big_number = 40 #int((data_max - data_min + 1) / 10000) print("depth map max is {}, min is {}, bin nubmer is {}".format(np.max(data), np.min(data), big_number)) bin_range = np.linspace(data_min, data_max+1, big_number) histogram, bin_edges = np.histogram(data, bin_range, range=(np.min(data), np.max(data))) # configure and draw the histogram figure plt.figure() plt.title("Depth map data distribution.") plt.xlabel("depth value") plt.ylabel("pixels") plt.plot(bin_edges[0:-1], histogram) # <- or here plt.show() def depth2disparity(depth_map, baseline=1.0, focal=1.0): """ Convert the depth map to disparity map. :param depth_map: depth map data :type depth_map: numpy :param baseline: [description], defaults to 1 :type baseline: float, optional :param focal: [description], defaults to 1 :type focal: float, optional :return: disparity map data, :rtype: numpy """ no_zeros_index = np.where(depth_map != 0) disparity_map = np.full(depth_map.shape, np.Inf, np.float64) disparity_map[no_zeros_index] = (baseline * focal) / depth_map[no_zeros_index] return disparity_map def disparity2depth(disparity_map, baseline=1.0, focal=1.0): """Convert disparity value to depth value. """ no_zeros_index = np.where(disparity_map != 0) depth_map = np.full(disparity_map.shape, np.Inf, np.float64) depth_map[no_zeros_index] = (baseline * focal) / disparity_map[no_zeros_index] return depth_map def dispmap_normalize(dispmap, method = "", mask = None): """Normalize a disparity map. TODO support mask :param dispmap: the original disparity map. :type dispmap: numpy :param method: the normalization method's name. :type method: str :param mask: The mask map, available pixel is 1, invalid is 0. :type mask: numpy :return: normalized disparity map. :rtype: numpy """ if mask is None: mask = np.ones_like(dispmap, dtype= np.bool) dispmap_norm = None if method == "naive": dispmap_mean = np.mean(dispmap) dispmap_norm = dispmap / dispmap_mean elif method == "midas": median_dispmap = np.median(dispmap[mask]) dev_dispmap = np.sum(np.abs(dispmap[mask] - median_dispmap)) / np.sum(mask) dispmap_norm = np.full(dispmap.shape, np.nan, dtype= np.float64) dispmap_norm[mask] = (dispmap[mask] - median_dispmap) / dev_dispmap elif method == "range01": max_index = np.argsort(dispmap, axis=None)[int(dispmap.size * 0.96)] min_index = np.argsort(dispmap, axis=None)[int(dispmap.size * 0.04)] max = dispmap.flatten()[max_index] min = dispmap.flatten()[min_index] dispmap_norm = (dispmap - min) / (max-min) else: log.error("Normalize methoder {} do not supprot".format(method)) return dispmap_norm def subdepthmap_erp2tang(subimage_depthmap_erp, gnomonic_coord_xy): """ Covert the subimage's depth map from erp to tangent space. :param subimage_depthmap: the subimage's depth map in perspective projection, [height, width]. :param gnomonic_coord: The tangent image each pixels location in gnomonic space, [height, width] * 2. """ gnomonic_coord_x = gnomonic_coord_xy[0] gnomonic_coord_y = gnomonic_coord_xy[1] # convert the ERP depth map value to tangent image coordinate depth value center2pixel_length = np.sqrt(np.square(gnomonic_coord_x) + np.square(gnomonic_coord_y) + np.ones_like(gnomonic_coord_y)) subimage_depthmap_persp = np.divide(subimage_depthmap_erp, center2pixel_length) return subimage_depthmap_persp def subdepthmap_tang2erp(subimage_depthmap_persp, gnomonic_coord_xy): """ Convert the depth map from perspective to ERP space. :param subimage_erp_depthmap: subimage's depth map of ERP space. :type subimage_erp_depthmap: numpy :param gnomonic_coord_xy: The tangent image's pixels gnomonic coordinate, x and y. :type gnomonic_coord_xy: list """ gnomonic_coord_x = gnomonic_coord_xy[0] gnomonic_coord_y = gnomonic_coord_xy[1] center2pixel_length = np.sqrt(np.square(gnomonic_coord_x) + np.square(gnomonic_coord_y) + np.ones_like(gnomonic_coord_y)) subimage_depthmap_erp = subimage_depthmap_persp * center2pixel_length return subimage_depthmap_erp def depthmap_pyramid(depthmap_list, pyramid_layer_number, pyramid_downscale): """ Create the all depth maps pyramid. :param depthmap_list: The list of depth map :type depthmap_list: list :param pyramid_layer_number: the pyramid level number :type pyramid_layer_number: int :param pyramid_downscale: pyramid downsample ration, coarse_level_size = fine_level_size * pyramid_downscale :type pyramid_downscale: float :return: the pyramid for each depth map. the 1st index is pyramid level, 2nd is image index, [pyramid_idx][image_idx], 1st (index 0) level is coarsest image. :rtype: list """ depthmap_number = len(depthmap_list) depthmap_pryamid = [[0] * depthmap_number for i in range(pyramid_layer_number)] for index in range(0, depthmap_number): if pyramid_layer_number == 1: depthmap_pryamid[0][index] = depthmap_list[index].astype(np.float64) else: depthmap = depthmap_list[index] pyramid = tuple(pyramid_gaussian(depthmap, max_layer=pyramid_layer_number - 1, downscale=pyramid_downscale, multichannel=False)) for layer_index in range(0, pyramid_layer_number): depthmap_pryamid[pyramid_layer_number - layer_index - 1][index] = pyramid[layer_index].astype(np.float64) return depthmap_pryamid
{"hexsha": "e5402fbbcf07f84dbe9244ea5791bd7da31d21b3", "size": 20257, "ext": "py", "lang": "Python", "max_stars_repo_path": "code/python/src/utility/depthmap_utils.py", "max_stars_repo_name": "manurare/360monodepth", "max_stars_repo_head_hexsha": "43347063f1fa521ea3e0866ff85bbcb734b64393", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "code/python/src/utility/depthmap_utils.py", "max_issues_repo_name": "manurare/360monodepth", "max_issues_repo_head_hexsha": "43347063f1fa521ea3e0866ff85bbcb734b64393", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "code/python/src/utility/depthmap_utils.py", "max_forks_repo_name": "manurare/360monodepth", "max_forks_repo_head_hexsha": "43347063f1fa521ea3e0866ff85bbcb734b64393", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.925862069, "max_line_length": 161, "alphanum_fraction": 0.6580934985, "include": true, "reason": "import numpy", "num_tokens": 5333}
from concurrent import futures import nevergrad.optimization as optimization from nevergrad import instrumentation as instru import argparse import torch from train_loop import TrainLoop import torch.optim as optim import torch.utils.data import model as model_ import numpy as np from data_load import Loader, Loader_valid import os import sys from utils.utils import * def get_file_name(dir_): idx = np.random.randint(1) fname = dir_ + '/' + str(np.random.randint(1,999999999,1)[0]) + '.pt' while os.path.isfile(fname): fname = dir_ + '/' + str(np.random.randint(1,999999999,1)[0]) + '.pt' file_ = open(fname, 'wb') pickle.dump(None, file_) file_.close() return fname # Training settings parser=argparse.ArgumentParser(description='HP random search for ASV') parser.add_argument('--batch-size', type=int, default=24, metavar='N', help='input batch size for training (default: 24)') parser.add_argument('--valid-batch-size', type=int, default=64, metavar='N', help='input batch size for valid (default: 64)') parser.add_argument('--epochs', type=int, default=200, metavar='N', help='number of epochs to train (default: 200)') parser.add_argument('--budget', type=int, default=30, metavar='N', help='Maximum training runs') parser.add_argument('--no-cuda', action='store_true', default=False, help='Disables GPU use') parser.add_argument('--model', choices=['resnet_mfcc', 'resnet_34', 'resnet_lstm', 'resnet_qrnn', 'resnet_stats', 'resnet_large', 'resnet_small', 'resnet_2d', 'TDNN', 'TDNN_att', 'TDNN_multihead', 'TDNN_lstm', 'TDNN_aspp', 'TDNN_mod', 'TDNN_multipool', 'transformer', 'all'], default='resnet_mfcc', help='Model arch according to input type') parser.add_argument('--workers', type=int, help='number of data loading workers', default=4) parser.add_argument('--hp-workers', type=int, help='number of search workers', default=1) parser.add_argument('--seed', type=int, default=1, metavar='S', help='random seed (default: 1)') parser.add_argument('--save-every', type=int, default=1, metavar='N', help='how many epochs to wait before logging training status. Default is 1') parser.add_argument('--ncoef', type=int, default=23, metavar='N', help='number of MFCCs (default: 23)') parser.add_argument('--data-info-path', type=str, default='./data/', metavar='Path', help='Path to folder containing spk2utt and utt2spk files') parser.add_argument('--checkpoint-path', type=str, default=None, metavar='Path', help='Path for checkpointing') parser.add_argument('--logdir', type=str, default=None, metavar='Path', help='Path for checkpointing') args=parser.parse_args() args.cuda=True if not args.no_cuda and torch.cuda.is_available() else False def train(lr, l2, max_gnorm, momentum, margin, lambda_, swap, latent_size, n_frames, model, ncoef, epochs, batch_size, valid_batch_size, n_workers, cuda, train_hdf_file, valid_hdf_file, cp_path, softmax, delta, logdir): if cuda: device=get_freer_gpu() if args.model == 'resnet_qrnn': import cupy cupy.cuda.Device(int(str(device).split(':')[-1])).use() cp_name = get_file_name(cp_path) if args.logdir: from torch.utils.tensorboard import SummaryWriter writer = SummaryWriter(log_dir=logdir+cp_name, comment=args.model, purge_step=True) else: writer = None train_dataset = Loader(hdf5_name = train_hdf_file, max_nb_frames = int(n_frames), delta = delta) train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=batch_size, shuffle=True, num_workers=n.workers, worker_init_fn=set_np_randomseed) valid_dataset = Loader_valid(hdf5_name = valid_hdf_file, max_nb_frames = int(n_frames), delta = delta) valid_loader = torch.utils.data.DataLoader(valid_dataset, batch_size=valid_batch_size, shuffle=True, num_workers=n_workers, worker_init_fn=set_np_randomseed) if model == 'resnet_mfcc': model=model_.ResNet_mfcc(n_z=int(latent_size), proj_size=train_dataset.n_speakers, ncoef=ncoef, sm_type=softmax, delta=delta) elif model == 'resnet_34': model=model_.ResNet_34(n_z=int(latent_size), proj_size=train_dataset.n_speakers, ncoef=ncoef, sm_type=softmax, delta=delta) elif model == 'resnet_lstm': model=model_.ResNet_lstm(n_z=int(latent_size), proj_size=train_dataset.n_speakers, ncoef=ncoef, sm_type=softmax, delta=delta) elif model == 'resnet_qrnn': model=model_.ResNet_qrnn(n_z=int(latent_size), proj_size=train_dataset.n_speakers, ncoef=ncoef, sm_type=softmax, delta=delta) elif model == 'resnet_stats': model=model_.ResNet_stats(n_z=int(latent_size), proj_size=train_dataset.n_speakers, ncoef=ncoef, sm_type=softmax, delta=delta) elif args.model == 'resnet_large': model = model_.ResNet_large(n_z=int(latent_size), proj_size=train_dataset.n_speakers, ncoef=args.ncoef, sm_type=softmax, delta=delta) elif args.model == 'resnet_small': model = model_.ResNet_small(n_z=int(latent_size), proj_size=train_dataset.n_speakers, ncoef=args.ncoef, sm_type=softmax, delta=delta) elif args.model == 'resnet_2d': model = model_.ResNet_2d(n_z=int(latent_size), proj_size=train_dataset.n_speakers, ncoef=args.ncoef, sm_type=softmax, delta=delta) elif args.model == 'TDNN': model = model_.TDNN(n_z=int(latent_size), proj_size=train_dataset.n_speakers, ncoef=args.ncoef, sm_type=softmax, delta=delta) elif args.model == 'TDNN_att': model = model_.TDNN_att(n_z=int(latent_size), proj_size=train_dataset.n_speakers, ncoef=args.ncoef, sm_type=softmax, delta=delta) elif args.model == 'TDNN_multihead': model = model_.TDNN_multihead(n_z=int(latent_size), proj_size=train_dataset.n_speakers, ncoef=args.ncoef, sm_type=softmax, delta=delta) elif args.model == 'TDNN_lstm': model = model_.TDNN_lstm(n_z=int(latent_size), proj_size=train_dataset.n_speakers, ncoef=args.ncoef, sm_type=softmax, delta=delta) elif args.model == 'TDNN_aspp': model = model_.TDNN_aspp(n_z=int(latent_size), proj_size=train_dataset.n_speakers, ncoef=args.ncoef, sm_type=softmax, delta=delta) elif args.model == 'TDNN_mod': model = model_.TDNN_mod(n_z=int(latent_size), proj_size=train_dataset.n_speakers, ncoef=args.ncoef, sm_type=softmax, delta=delta) elif args.model == 'TDNN_multipool': model = model_.TDNN_multipool(n_z=int(latent_size), proj_size=train_dataset.n_speakers, ncoef=args.ncoef, sm_type=softmax, delta=delta) elif args.model == 'transformer': model = model_.transformer_enc(n_z=int(latent_size), proj_size=train_dataset.n_speakers, ncoef=args.ncoef, sm_type=softmax, delta=delta) if cuda: model=model.to(device) else: device=None optimizer=optim.SGD(model.parameters(), lr=lr, momentum=momentum, weight_decay=l2) trainer=TrainLoop(model, optimizer, train_loader, valid_loader, max_gnorm=max_gnorm, margin=margin, lambda_=lambda_, verbose=-1, device=device, cp_name=cp_name, save_cp=True, checkpoint_path=cp_path, swap=swap, softmax=True, pretrain=False, mining=True, cuda=cuda, logger=writer) return trainer.train(n_epochs=epochs) lr=instru.var.OrderedDiscrete([0.1, 0.01, 0.001, 0.0001, 0.00001]) l2=instru.var.OrderedDiscrete([0.001, 0.0005, 0.0001, 0.00005, 0.00001]) max_gnorm=instru.var.OrderedDiscrete([10.0, 100.0, 1000.0]) momentum=instru.var.OrderedDiscrete([0.1, 0.3, 0.5, 0.7, 0.9]) margin=instru.var.OrderedDiscrete([0.1, 0.01, 0.001, 0.0001, 0.00001]) lambda_=instru.var.OrderedDiscrete([0.1, 0.15, 0.20, 0.25, 0.30, 0.4, 0.50]) swap=instru.var.OrderedDiscrete([True, False]) latent_size=instru.var.OrderedDiscrete([64, 128, 256, 512]) n_frames=instru.var.OrderedDiscrete([300, 400, 500, 600, 800]) model=instru.var.OrderedDiscrete(['resnet_mfcc', 'resnet_34', 'resnet_lstm', 'resnet_qrnn', 'resnet_stats', 'resnet_large', 'resnet_small', 'TDNN', 'TDNN_att', 'TDNN_multihead', 'TDNN_lstm', 'TDNN_aspp', 'TDNN_mod', 'TDNN_multipool', 'transformer']) if args.model=='all' else args.model ncoef=args.ncoef epochs=args.epochs batch_size=args.batch_size valid_batch_size=args.valid_batch_size n_workers=args.workers cuda=args.cuda train_hdf_file=args.train_hdf_file valid_hdf_file=args.valid_hdf_file checkpoint_path=args.checkpoint_path softmax=instru.var.OrderedDiscrete(['softmax', 'am_softmax']) delta=instru.var.OrderedDiscrete([True, False]) logdir=args.logdir instrum=instru.Instrumentation(lr, l2, max_gnorm, momentum, margin, lambda_, swap, latent_size, n_frames, model, ncoef, epochs, batch_size, valid_batch_size, n_workers, cuda, train_hdf_file, valid_hdf_file, checkpoint_path, softmax, delta, logdir) hp_optimizer=optimization.optimizerlib.RandomSearch(instrumentation=instrum, budget=args.budget, num_workers=args.hp_workers) with futures.ThreadPoolExecutor(max_workers=args.hp_workers) as executor: print(hp_optimizer.optimize(train, executor=executor))
{"hexsha": "d424d4edff1b259e4df801390cd4ee6b10bbd3e3", "size": 8618, "ext": "py", "lang": "Python", "max_stars_repo_path": "hp_search.py", "max_stars_repo_name": "joaomonteirof/multitask_asv", "max_stars_repo_head_hexsha": "16795628e4bcac8c7b83b6edeacee1a739495092", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 4, "max_stars_repo_stars_event_min_datetime": "2019-09-19T07:33:35.000Z", "max_stars_repo_stars_event_max_datetime": "2020-02-21T10:26:06.000Z", "max_issues_repo_path": "hp_search.py", "max_issues_repo_name": "joaomonteirof/multitask_asv", "max_issues_repo_head_hexsha": "16795628e4bcac8c7b83b6edeacee1a739495092", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "hp_search.py", "max_forks_repo_name": "joaomonteirof/multitask_asv", "max_forks_repo_head_hexsha": "16795628e4bcac8c7b83b6edeacee1a739495092", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 6, "max_forks_repo_forks_event_min_datetime": "2019-06-22T21:06:56.000Z", "max_forks_repo_forks_event_max_datetime": "2020-04-25T09:55:06.000Z", "avg_line_length": 59.4344827586, "max_line_length": 341, "alphanum_fraction": 0.7697841727, "include": true, "reason": "import numpy,import cupy", "num_tokens": 2457}
from gensim.models import Word2Vec import json import logging import numpy as np logging.basicConfig(format='%(asctime)s : %(message)s', level=logging.INFO) word2vec_params = { 'sg': 1, # 0 : CBOW; 1 : skip-gram "size": 300, "alpha": 0.01, "min_alpha": 0.0005, 'window': 10, 'min_count': 1, 'seed': 1, "workers": 6, "negative": 0, "hs": 1, # 0: negative sampling, 1:hierarchical softmax 'compute_loss': True, 'iter': 50, 'cbow_mean': 0, } with open('./data/poetry.json', 'rb') as f: sentences = json.load(f) # sentences = sentences[:10] model = Word2Vec(**word2vec_params) model.build_vocab(sentences) trained_word_count, raw_word_count = model.train(sentences, compute_loss=True, total_examples=model.corpus_count, epochs=model.epochs) # +++++++++++++++++++++++++++++++++++++ # Convert to Pytorch Embedding # ------------------------------------- model = Word2Vec.load("skipgram_model_gensim.pk") def get_vocab_dict(model): word_idx = {key: j.index for key, j in model.wv.vocab.items()} idx_word = {i: k for k, i in word_idx.items()} return word_idx, idx_word word_idx, idx_word = get_vocab_dict(model) weights = model.wv.vectors embedding_weights = 'word2vec_skipgram_weights.pk' np.save(embedding_weights, weights) def rewrite_word_idx_jsons(): from multiprocessing.dummy import Pool as ThreadPool with open('./data/word_dictionary.json', 'w', encoding='utf-8') as f: json.dump(word_idx, f, ensure_ascii=False) # word idx frequency with open('./data/idx_word.json', 'w') as f: json.dump(idx_word, f) # convert the property text into number def word_to_idx(sentence): return [word_idx[i] for i in sentence] p = ThreadPool(4) poetry_idx = p.map(word_to_idx, sentences) p.close() p.join() with open('./data/poetry_idx.json', 'w', encoding='utf-8') as f: json.dump(poetry_idx, f, ensure_ascii=False) print("Done") with open("./data/word_frequency.json", 'rb') as f: word_freq = json.load(f) word_freq = {word_idx[k]:v for k,v in word_freq.items()} with open("./data/word_idx_frequency.json", 'w') as f: json.dump(word_freq, f) rewrite_word_idx_jsons() # +++++++++++++++++++++++++++++++++++++ # Check result # ------------------------------------- model_name = 'skipgram_model_gensim.pk' model.save(model_name) model.wv.similarity(',', "。") model.wv.similarity('西', "東") model.wv.similarity('人', "桃") model.wv.similarity('天', "地") model.wv.similarity('日', "月") model.wv.similarity('君', "人") model.wv.most_similar("東") model.wv.most_similar("人") model.wv.most_similar("汝") model.wv.most_similar("他") with open('./data/word_frequency.json', 'rb') as f: word_frequency = json.load(f) from collections import Counter most_freq = Counter(word_frequency) frequent_words = most_freq.most_common(150) import matplotlib.pyplot as plt from sklearn.manifold import TSNE import numpy as np plt.rcParams['font.sans-serif'] = ['SimHei'] plt.rcParams['axes.unicode_minus'] = False word_vect = np.array([model.wv.word_vec(i[0], use_norm=False) for i in frequent_words]) word_label = [i[0] for i in frequent_words] tsne = TSNE(n_components=2, random_state=1, verbose=2, n_iter=15000, perplexity=30) Y = tsne.fit_transform(word_vect) x_cord = Y[:, 0] y_cord = Y[:, 1] plt.scatter(x_cord, y_cord) for label, x, y in zip(word_label, x_cord, y_cord): plt.annotate(label, xy=(x, y), xytext=(0, 0), textcoords='offset points') # +++++++++++++++++++++++++++++++++++++ # Extra # ------------------------------------- # # losses = [] # learning_rate = 0.5 # step_size = (0.5 - 0.001) / 10 # # for i in range(1): # end_lr = learning_rate - step_size # trained_word_count, raw_word_count = model.train(sentences, compute_loss=True, # start_alpha=learning_rate, # end_alpha=end_lr, # total_examples=model.corpus_count, # epochs=1) # loss = model.get_latest_training_loss() # losses.append(loss) # print(i, loss, learning_rate) # learning_rate -= step_size # # #
{"hexsha": "8544f2616ff531a0a981609fd0a60ba3eb07934c", "size": 4575, "ext": "py", "lang": "Python", "max_stars_repo_path": "Negative Sampling and LSTM/word2vec_gensim.py", "max_stars_repo_name": "yu45020/Pytorch_Learning_Note", "max_stars_repo_head_hexsha": "b4b89b604fc2effd1ac16359f842b2f143019309", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 9, "max_stars_repo_stars_event_min_datetime": "2018-12-14T05:30:57.000Z", "max_stars_repo_stars_event_max_datetime": "2021-09-16T11:51:00.000Z", "max_issues_repo_path": "Negative Sampling and LSTM/word2vec_gensim.py", "max_issues_repo_name": "yu45020/Pytorch_Learning_Note", "max_issues_repo_head_hexsha": "b4b89b604fc2effd1ac16359f842b2f143019309", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2018-12-16T11:18:17.000Z", "max_issues_repo_issues_event_max_datetime": "2018-12-20T22:37:06.000Z", "max_forks_repo_path": "Negative Sampling and LSTM/word2vec_gensim.py", "max_forks_repo_name": "yu45020/Pytorch_Learning_Note", "max_forks_repo_head_hexsha": "b4b89b604fc2effd1ac16359f842b2f143019309", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 4, "max_forks_repo_forks_event_min_datetime": "2018-12-14T05:30:52.000Z", "max_forks_repo_forks_event_max_datetime": "2020-12-06T12:12:42.000Z", "avg_line_length": 29.9019607843, "max_line_length": 90, "alphanum_fraction": 0.5750819672, "include": true, "reason": "import numpy", "num_tokens": 1163}
# -*- coding: utf-8 -*- """ make connectome matrices for each measure, sorted by infomap modules Created on Sun Jun 21 15:58:15 2015 @author: poldrack """ import os import numpy import nilearn.plotting import scipy.stats from myconnectome.utils.load_parcel_data import load_parcel_data import matplotlib.pyplot as plt def r_to_z(r): # fisher transform z=0.5*numpy.log((1.0+r)/(1.0-r)) z[numpy.where(numpy.isinf(z))]=0 z[numpy.where(numpy.isnan(z))]=0 return z def z_to_r(z): # inverse transform return (numpy.exp(2.0*z) - 1)/(numpy.exp(2.0*z) + 1) basedir=os.environ['MYCONNECTOME_DIR'] def mk_sorted_adjmatrices(dtidensity=None): utr=numpy.triu_indices(630,1) parceldata=load_parcel_data(os.path.join(basedir,'parcellation/parcel_data.txt')) modules=numpy.zeros(len(parceldata)) for i in range(len(parceldata)): modules[i]=parceldata[i+1]['powernum'] module_idx=numpy.argsort(modules) breakpoints=[] netnames=[] for i in range(1,len(module_idx)): if not modules[module_idx[i]] == modules[module_idx[i-1]]: breakpoints.append(i) breakpoints.append(630) netnames=['none','DMN','V2','FP1','V1','DA','VA','Sal','CO','SM','FP2','MPar','ParOcc','subcort'] dtidata=numpy.loadtxt(os.path.join(basedir,'diffusion/tracksumm_distcorr.txt'),skiprows=1) dtidata=dtidata[:,1:] dtidata=dtidata+dtidata.T tmp=dtidata[module_idx,:] dtidata_sorted=tmp[:,module_idx] if not dtidensity: dtithresh=0 else: dtithresh=scipy.stats.scoreatpercentile(dtidata[utr],dtidensity) dtibin=dtidata>dtithresh dtidata_skyra=numpy.loadtxt(os.path.join(basedir,'diffusion/tracksumm_distcorr_skyra.txt'),skiprows=1) dtidata_skyra=dtidata_skyra[:,1:] dtidata_skyra=dtidata_skyra+dtidata_skyra.T tmp=dtidata_skyra[module_idx,:] dtidata_skyra_sorted=tmp[:,module_idx] if not dtidensity: dtithresh_skyra=0 else: dtithresh_skyra=scipy.stats.scoreatpercentile(dtidata_skyra[utr],dtidensity) dtibin_skyra=dtidata_skyra>0 bp=numpy.array(breakpoints) textlocs=numpy.mean(numpy.vstack((bp,numpy.hstack(([0],bp[:-1])))),0) plt.figure(figsize=[12,12]) plt.imshow(dtidata_sorted,origin='upper',cmap='gray',vmin=0,vmax=scipy.stats.scoreatpercentile(dtidata,90)) for b in breakpoints: plt.plot([0,630],[b,b],'r',linewidth=1.5) plt.plot([b,b],[0,630],'r',linewidth=1.5) plt.yticks(textlocs,netnames,fontsize=14) plt.xticks(textlocs,netnames,fontsize=14,rotation=90) plt.axis('image') plt.title('Diffusion tractography - HARDI',fontsize=18) plt.savefig(os.path.join(basedir,'diffusion/adjmtx_binarized_sorted_modules_HARDI.pdf')) plt.figure(figsize=[12,12]) plt.imshow(dtidata_skyra_sorted,origin='upper',cmap='gray',vmin=0,vmax=scipy.stats.scoreatpercentile(dtidata_skyra,90)) for b in breakpoints: plt.plot([0,630],[b,b],'r',linewidth=1.5) plt.plot([b,b],[0,630],'r',linewidth=1.5) plt.yticks(textlocs,netnames,fontsize=14) plt.xticks(textlocs,netnames,fontsize=14,rotation=90) plt.axis('image') plt.title('Diffusion tractography - Skyra',fontsize=18) plt.savefig(os.path.join(basedir,'diffusion/adjmtx_binarized_sorted_modules_Skyra.pdf')) dice=(2.0*numpy.sum(dtibin_skyra*dtibin))/float(numpy.sum(dtibin_skyra)+numpy.sum(dtibin)) print 'dice(HARDI,skyra)=',dice rsfmridata=numpy.load(os.path.join(basedir,'rsfmri/corrdata.npy')) rsfmridata=r_to_z(rsfmridata) meancorr_z=numpy.mean(rsfmridata,0) meancorr=z_to_r(meancorr_z) rsadj=numpy.zeros((630,630)) rsadj[utr]=meancorr rsadj=rsadj+rsadj.T tmp=rsadj[module_idx,:] rsadj_sorted=tmp[:,module_idx] plt.figure(figsize=[12,12]) plt.imshow(rsadj_sorted,origin='upper',cmap='seismic',vmin=-.8,vmax=0.8) for b in breakpoints: plt.plot([0,630],[b,b],'r',linewidth=1.5) plt.plot([b,b],[0,630],'r',linewidth=1.5) plt.yticks(textlocs,netnames,fontsize=14) plt.xticks(textlocs,netnames,fontsize=14,rotation=90) plt.axis('image') plt.title('Full correlation',fontsize=18) plt.colorbar(shrink=0.5) plt.savefig(os.path.join(basedir,'rsfmri/adjmtx_sorted_modules.pdf')) l2data=numpy.load(os.path.join(basedir,'rsfmri/l2_utr_data.npy')) l2data=r_to_z(l2data) meanl2data=numpy.mean(l2data,0) meanl2data=z_to_r(meanl2data) l2adj=numpy.zeros((630,630)) l2adj[utr]=meanl2data l2adj=l2adj+l2adj.T tmp=l2adj[module_idx,:] l2adj_sorted=tmp[:,module_idx] plt.figure(figsize=[12,12]) plt.imshow(l2adj_sorted,origin='upper',cmap='seismic',vmin=-.025,vmax=0.025) for b in breakpoints: plt.plot([0,630],[b,b],'r',linewidth=1.5) plt.plot([b,b],[0,630],'r',linewidth=1.5) plt.yticks(textlocs,netnames,fontsize=14) plt.xticks(textlocs,netnames,fontsize=14,rotation=90) plt.axis('image') plt.title('L2-regularized partial correlation',fontsize=18) plt.colorbar(shrink=0.5) plt.savefig(os.path.join(basedir,'rsfmri/pcorr_l2_adjmtx_sorted_modules.pdf')) taskdata=numpy.loadtxt(os.path.join(basedir,'taskfmri/task_connectome.txt')) tmp=taskdata[module_idx,:] taskdata_sorted=tmp[:,module_idx] plt.figure(figsize=[12,12]) plt.imshow(taskdata_sorted,origin='upper',cmap='seismic',vmin=-.8,vmax=0.8) for b in breakpoints: plt.plot([0,630],[b,b],'r',linewidth=1.5) plt.plot([b,b],[0,630],'r',linewidth=1.5) plt.yticks(textlocs,netnames,fontsize=14) plt.xticks(textlocs,netnames,fontsize=14,rotation=90) plt.axis('image') plt.title('Task correlation',fontsize=18) plt.colorbar(shrink=0.5) plt.savefig(os.path.join(basedir,'taskfmri/taskcorr_adjmtx_sorted_modules.pdf')) if __name__ == "__main__": mk_sorted_adjmatrices()
{"hexsha": "b5965a0816a8c8f5919e82fa7d1ac64313e4f918", "size": 5863, "ext": "py", "lang": "Python", "max_stars_repo_path": "myconnectome/rsfmri/mk_sorted_adjmatrices.py", "max_stars_repo_name": "poldrack/myconnectome", "max_stars_repo_head_hexsha": "201f414b3165894d6fe0be0677c8a58f6d161948", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 28, "max_stars_repo_stars_event_min_datetime": "2015-04-02T16:43:14.000Z", "max_stars_repo_stars_event_max_datetime": "2020-06-17T20:04:26.000Z", "max_issues_repo_path": "myconnectome/rsfmri/mk_sorted_adjmatrices.py", "max_issues_repo_name": "poldrack/myconnectome", "max_issues_repo_head_hexsha": "201f414b3165894d6fe0be0677c8a58f6d161948", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 11, "max_issues_repo_issues_event_min_datetime": "2015-05-19T02:57:22.000Z", "max_issues_repo_issues_event_max_datetime": "2017-03-17T17:36:16.000Z", "max_forks_repo_path": "myconnectome/rsfmri/mk_sorted_adjmatrices.py", "max_forks_repo_name": "poldrack/myconnectome", "max_forks_repo_head_hexsha": "201f414b3165894d6fe0be0677c8a58f6d161948", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 10, "max_forks_repo_forks_event_min_datetime": "2015-05-21T17:01:26.000Z", "max_forks_repo_forks_event_max_datetime": "2020-11-11T04:28:08.000Z", "avg_line_length": 34.2865497076, "max_line_length": 123, "alphanum_fraction": 0.6953777929, "include": true, "reason": "import numpy,import scipy", "num_tokens": 1840}
# Copyright Hugh Perkins 2016, 2017 """ Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ import numpy as np import pyopencl as cl import os import math import pytest from test import test_common from test.test_common import offset_type basename = os.path.basename(__name__).split('.')[-1] print('basename', basename) def test_atomic_add_floats(context, q, float_data, float_data_gpu): ll_code = """ declare float @_Z9atomicAddIfET_PS0_S0_(float *, float) define void @mykernel(float* nocapture %data) #1 { %1 = call float @_Z9atomicAddIfET_PS0_S0_(float * %data, float 3.25) ret void } """ cl_code = test_common.ll_to_cl(ll_code, 'mykernel', 1) print('cl_code', cl_code) # try compiling it, just to be sure... float_data[0] = 0 kernel = test_common.build_kernel(context, cl_code, 'mykernel') cl.enqueue_copy(q, float_data_gpu, float_data) kernel(q, (128,), (32,), float_data_gpu, offset_type(0), offset_type(0), cl.LocalMemory(32)) from_gpu = np.copy(float_data) cl.enqueue_copy(q, from_gpu, float_data_gpu) q.finish() print('from_gpu[0]', from_gpu[0]) assert from_gpu[0] == 3.25 * 128 def test_atomic_cas_int(context, q, int_data, int_data_gpu): # atomicCAS api based on usage in Eigen unsupported/Eigen/CXX11/src/Tensor/TensorReductionCuda.h cu_code = """ __global__ void mykernel(int *data) { int tid = threadIdx.x; int gid = blockIdx.x * blockDim.x + threadIdx.x; int numAttempts = 0; while(true) { numAttempts += 1; int oldValue = data[0]; int newValue = oldValue + 1; int returnedOld = atomicCAS(data, oldValue, newValue); if(returnedOld == oldValue) { break; } } data[1 + gid] = numAttempts; } """ cl_code = test_common.cu_to_cl(cu_code, '_Z8mykernelPi', 1) print('cl_code', cl_code) int_data[0] = 0 kernel = test_common.build_kernel(context, cl_code, '_Z8mykernelPi') cl.enqueue_copy(q, int_data_gpu, int_data) num_blocks = 4 threads_per_block = 4 kernel(q, (num_blocks * threads_per_block,), (threads_per_block,), int_data_gpu, offset_type(0), offset_type(0), cl.LocalMemory(32)) from_gpu = np.copy(int_data) cl.enqueue_copy(q, from_gpu, int_data_gpu) q.finish() print('from_gpu', from_gpu[:17]) # check two things: # - final value of data[0] should equal num_blocks * threads_per_block # - the number of attempts should be unique, for each thread assert from_gpu[0] == num_blocks * threads_per_block seen_num_attempts = set() for i in range(num_blocks * threads_per_block): num_attempts = from_gpu[i + 1] if num_attempts in seen_num_attempts: raise Exception('already saw num_attempts %s' % num_attempts) seen_num_attempts.add(num_attempts) def test_atomic_cas_unsignedint(context, q, int_data, int_data_gpu): # atomicCAS api based on usage in Eigen unsupported/Eigen/CXX11/src/Tensor/TensorReductionCuda.h cu_code = """ __global__ void mykernel(unsigned int *data) { int tid = threadIdx.x; int gid = blockIdx.x * blockDim.x + threadIdx.x; int numAttempts = 0; while(true) { numAttempts += 1; unsigned int oldValue = data[0]; unsigned int newValue = oldValue + 1; unsigned int returnedOld = atomicCAS(data, oldValue, newValue); if(returnedOld == oldValue) { break; } } data[1 + gid] = numAttempts; } """ cl_code = test_common.cu_to_cl(cu_code, '_Z8mykernelPj', 1) print('cl_code', cl_code) int_data[0] = 0 kernel = test_common.build_kernel(context, cl_code, '_Z8mykernelPj') cl.enqueue_copy(q, int_data_gpu, int_data) num_blocks = 4 threads_per_block = 4 kernel(q, (num_blocks * threads_per_block,), (threads_per_block,), int_data_gpu, offset_type(0), offset_type(0), cl.LocalMemory(32)) from_gpu = np.copy(int_data) cl.enqueue_copy(q, from_gpu, int_data_gpu) q.finish() print('from_gpu', from_gpu[:17]) # check two things: # - final value of data[0] should equal num_blocks * threads_per_block # - the number of attempts should be unique, for each thread assert from_gpu[0] == num_blocks * threads_per_block seen_num_attempts = set() for i in range(num_blocks * threads_per_block): num_attempts = from_gpu[i + 1] if num_attempts in seen_num_attempts: raise Exception('already saw num_attempts %s' % num_attempts) seen_num_attempts.add(num_attempts) def test_atomic_inc_int(context, q, int_data, int_data_gpu): # atomicCAS api based on usage in Eigen unsupported/Eigen/CXX11/src/Tensor/TensorReductionCuda.h cu_code = """ __global__ void mykernel(int *data, int limit) { atomicInc((unsigned int *)data, limit); } """ cl_code = test_common.cu_to_cl(cu_code, '_Z8mykernelPii', 1) print('cl_code', cl_code) int_data[0] = 0 int_data[1] = 0 kernel = test_common.build_kernel(context, cl_code, '_Z8mykernelPii') cl.enqueue_copy(q, int_data_gpu, int_data) num_blocks = 4 threads_per_block = 4 modulus = 11 kernel(q, (num_blocks * threads_per_block,), (threads_per_block,), int_data_gpu, offset_type(0), offset_type(0), np.int32(256), cl.LocalMemory(32)) kernel(q, (num_blocks * threads_per_block,), (threads_per_block,), int_data_gpu, offset_type(0), offset_type(4), np.int32(modulus - 1), cl.LocalMemory(32)) from_gpu = np.copy(int_data) cl.enqueue_copy(q, from_gpu, int_data_gpu) q.finish() print('from_gpu', from_gpu[:2]) assert from_gpu[0] == num_blocks * threads_per_block assert from_gpu[1] == num_blocks * threads_per_block % modulus
{"hexsha": "3524dfd3a8afde17d10fdcaf1746f8c5a9ef5c64", "size": 6178, "ext": "py", "lang": "Python", "max_stars_repo_path": "test/test_atomic.py", "max_stars_repo_name": "dendisuhubdy/coriander", "max_stars_repo_head_hexsha": "7df182981e5c4a8e043fea25d272d025a953f06d", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 644, "max_stars_repo_stars_event_min_datetime": "2017-05-21T05:25:20.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-25T04:18:14.000Z", "max_issues_repo_path": "test/test_atomic.py", "max_issues_repo_name": "hughperkins/cuda-ir-to-opencl", "max_issues_repo_head_hexsha": "7c6b65bc08a25a6bce21efe7b86be8fa985597af", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 82, "max_issues_repo_issues_event_min_datetime": "2017-05-21T15:19:24.000Z", "max_issues_repo_issues_event_max_datetime": "2022-01-30T01:41:44.000Z", "max_forks_repo_path": "test/test_atomic.py", "max_forks_repo_name": "hughperkins/cuda-ir-to-opencl", "max_forks_repo_head_hexsha": "7c6b65bc08a25a6bce21efe7b86be8fa985597af", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 88, "max_forks_repo_forks_event_min_datetime": "2017-05-21T01:31:16.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-31T09:28:17.000Z", "avg_line_length": 36.5562130178, "max_line_length": 159, "alphanum_fraction": 0.6942376174, "include": true, "reason": "import numpy", "num_tokens": 1657}
using Random Random.seed!(1974) println("Seed 1974: ",rand(),"\t", rand(), "\t", rand()) Random.seed!(1975) println("Seed 1975: ",rand(),"\t", rand(), "\t", rand()) Random.seed!(1974) println("Seed 1974: ",rand(),"\t", rand(), "\t", rand())
{"hexsha": "f7896349fb38c51b5fde5748caaf8f2b6ad9a445", "size": 242, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "1_chapter/seedExample.jl", "max_stars_repo_name": "Yoshinobu-Ishizaki/StatsWithJuliaBook", "max_stars_repo_head_hexsha": "4c704e96d87b91e680122a6b6fa2d2083c70ea88", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 988, "max_stars_repo_stars_event_min_datetime": "2018-06-21T00:44:33.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-31T01:37:47.000Z", "max_issues_repo_path": "1_chapter/seedExample.jl", "max_issues_repo_name": "Yoshinobu-Ishizaki/StatsWithJuliaBook", "max_issues_repo_head_hexsha": "4c704e96d87b91e680122a6b6fa2d2083c70ea88", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 41, "max_issues_repo_issues_event_min_datetime": "2019-02-20T05:06:27.000Z", "max_issues_repo_issues_event_max_datetime": "2021-09-23T16:53:08.000Z", "max_forks_repo_path": "1_chapter/seedExample.jl", "max_forks_repo_name": "Yoshinobu-Ishizaki/StatsWithJuliaBook", "max_forks_repo_head_hexsha": "4c704e96d87b91e680122a6b6fa2d2083c70ea88", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 264, "max_forks_repo_forks_event_min_datetime": "2018-07-31T03:11:29.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-26T16:12:13.000Z", "avg_line_length": 26.8888888889, "max_line_length": 56, "alphanum_fraction": 0.5785123967, "num_tokens": 81}
<p align="center"> </p> ## Interactive Spatial Estimation with Strings of Data Demonstration ### Michael Pyrcz, Associate Professor, University of Texas at Austin ##### [Twitter](https://twitter.com/geostatsguy) | [GitHub](https://github.com/GeostatsGuy) | [Website](http://michaelpyrcz.com) | [GoogleScholar](https://scholar.google.com/citations?user=QVZ20eQAAAAJ&hl=en&oi=ao) | [Book](https://www.amazon.com/Geostatistical-Reservoir-Modeling-Michael-Pyrcz/dp/0199731446) | [YouTube](https://www.youtube.com/channel/UCLqEr-xV-ceHdXXXrTId5ig) | [LinkedIn](https://www.linkedin.com/in/michael-pyrcz-61a648a1) ### The Interactive Workflow Here's a simple workflow for calculating and visualizing the simple kriging weights for a string of data. #### Spatial Estimation Consider the case of making an estimate at some unsampled location, $𝑧(\bf{u}_0)$, where $z$ is the property of interest (e.g. porosity etc.) and $𝐮_0$ is a location vector describing the unsampled location. How would you do this given data, $𝑧(\bf{𝐮}_1)$, $𝑧(\bf{𝐮}_2)$, and $𝑧(\bf{𝐮}_3)$? It would be natural to use a set of linear weights to formulate the estimator given the available data. \begin{equation} z^{*}(\bf{u}) = \sum^{n}_{\alpha = 1} \lambda_{\alpha} z(\bf{u}_{\alpha}) \end{equation} We could add an unbiasedness constraint to impose the sum of the weights equal to one. What we will do is assign the remainder of the weight (one minus the sum of weights) to the global average; therefore, if we have no informative data we will estimate with the global average of the property of interest. \begin{equation} z^{*}(\bf{u}) = \sum^{n}_{\alpha = 1} \lambda_{\alpha} z(\bf{u}_{\alpha}) + \left(1-\sum^{n}_{\alpha = 1} \lambda_{\alpha} \right) \overline{z} \end{equation} We will make a stationarity assumption, so let's assume that we are working with residuals, $y$. \begin{equation} y^{*}(\bf{u}) = z^{*}(\bf{u}) - \overline{z}(\bf{u}) \end{equation} If we substitute this form into our estimator the estimator simplifies, since the mean of the residual is zero. \begin{equation} y^{*}(\bf{u}) = \sum^{n}_{\alpha = 1} \lambda_{\alpha} y(\bf{u}_{\alpha}) \end{equation} while satisfying the unbaisedness constraint. #### Kriging Now the next question is what weights should we use? We could use equal weighting, $\lambda = \frac{1}{n}$, and the estimator would be the average of the local data applied for the spatial estimate. This would not be very informative. We could assign weights considering the spatial context of the data and the estimate: * **spatial continuity** as quantified by the variogram (and covariance function) * **redundancy** the degree of spatial continuity between all of the available data with themselves * **closeness** the degree of spatial continuity between the avaiable data and the estimation location The kriging approach accomplishes this, calculating the best linear unbiased weights for the local data to estimate at the unknown location. The derivation of the kriging system and the resulting linear set of equations is available in the lecture notes. Furthermore kriging provides a measure of the accuracy of the estimate! This is the kriging estimation variance (sometimes just called the kriging variance). \begin{equation} \sigma^{2}_{E}(\bf{u}) = C(0) - \sum^{n}_{\alpha = 1} \lambda_{\alpha} C(\bf{u}_0 - \bf{u}_{\alpha}) \end{equation} What is 'best' about this estimate? Kriging estimates are best in that they minimize the above estimation variance. For lectures on kriging, start here - [spatial estimation](https://www.youtube.com/watch?v=CVkmuwF8cJ8&list=PLG19vXLQHvSB-D4XKYieEku9GQMQyAzjJ&index=42&t=808s). #### Properties of Kriging Here are some important properties of kriging: * **Exact interpolator** - kriging estimates with the data values at the data locations * **Kriging variance** can be calculated before getting the sample information, as the kriging estimation variance is not dependent on the values of the data nor the kriging estimate, i.e. the kriging estimator is homoscedastic. * **Spatial context** - kriging takes into account, furthermore to the statements on spatial continuity, closeness and redundancy we can state that kriging accounts for the configuration of the data and structural continuity of the variable being estimated. * **Scale** - kriging may be generalized to account for the support volume of the data and estimate. We will cover this later. * **Multivariate** - kriging may be generalized to account for multiple secondary data in the spatial estimate with the cokriging system. We will cover this later. * **Smoothing effect** of kriging can be forecast. We will use this to build stochastic simulations later. #### Balancing Closeness and Redudancy The simple kriging system balances: * **closeness** - spatial correlation between the data and the unknown location * **redundancy** - spatial correlation between the data (all possible pairs) This demonstration will illustrate this trade-off by observing the weights assigned to a string of data as the you interactively change the: * **data density** - spacing of the data on the string * **spatial continuity** - through the anisotropic variogram model (single structure with nugget effect) * **spatial estimate location** - relative to the string of data Check out the behavoir of this system, see if you can cause a string effect! * **string effect** - majority of weight assigned to the peripheral data along a string due to perceived lower redudancy. This is due to the assumption of kriging that the estimation does not have a boundary (i.e. it is embedded in an infinit domain). * observe this with high data redundancy (large along string continuity relative to data spacing) #### Spatial Continuity **Spatial Continuity** is the correlation between values over distance. * No spatial continuity – no correlation between values over distance, random values at each location in space regardless of separation distance. * Homogenous phenomenon have perfect spatial continuity, since all values as the same (or very similar) they are correlated. We need a statistic to quantify spatial continuity! A convenient method is the Semivariogram. #### Semivariogram Recall Function of difference over distance. * The expected (average) squared difference between values separated by a lag distance vector (distance and direction), $h$: \begin{equation} \gamma(\bf{h}) = \frac{1}{2 N(\bf{h})} \sum^{N(\bf{h})}_{\alpha=1} (z(\bf{u}_\alpha) - z(\bf{u}_\alpha + \bf{h}))^2 \end{equation} where $z(\bf{u}_\alpha)$ and $z(\bf{u}_\alpha + \bf{h})$ are the spatial sample values at tail and head locations of the lag vector respectively. * Calculated over a suite of lag distances to obtain a continuous function. * the $\frac{1}{2}$ term converts a variogram into a semivariogram, but in practice the term variogram is used instead of semivariogram. * We prefer the semivariogram because it relates directly to the covariance function, $C_x(\bf{h})$ and univariate variance, $\sigma^2_x$: \begin{equation} C_x(\bf{h}) = \sigma^2_x - \gamma(\bf{h}) \end{equation} Note the correlogram is related to the covariance function as: \begin{equation} \rho_x(\bf{h}) = \frac{C_x(\bf{h})}{\sigma^2_x} \end{equation} The correlogram provides of function of the $\bf{h}-\bf{h}$ scatter plot correlation vs. lag offset $\bf{h}$. \begin{equation} -1.0 \le \rho_x(\bf{h}) \le 1.0 \end{equation} For lectures on the semivariogram, start here - [spatial continuity lecture](https://www.youtube.com/watch?v=j0I5SGFm00c&list=PLG19vXLQHvSB-D4XKYieEku9GQMQyAzjJ&index=32&t=516s). #### Objective In the PGE 383: Stochastic Subsurface Modeling class I want to provide hands-on experience with building subsurface modeling workflows. Python provides an excellent vehicle to accomplish this. I have coded a package called GeostatsPy with GSLIB: Geostatistical Library (Deutsch and Journel, 1998) functionality that provides basic building blocks for building subsurface modeling workflows. The objective is to remove the hurdles of subsurface modeling workflow construction by providing building blocks and sufficient examples. This is not a coding class per se, but we need the ability to 'script' workflows working with numerical methods. #### Getting Started Here's the steps to get setup in Python with the GeostatsPy package: 1. Install Anaconda 3 on your machine (https://www.anaconda.com/download/). 2. From Anaconda Navigator (within Anaconda3 group), go to the environment tab, click on base (root) green arrow and open a terminal. 3. In the terminal type: pip install geostatspy. 4. Open Jupyter and in the top block get started by copy and pasting the code block below from this Jupyter Notebook to start using the geostatspy functionality. No external data is required for this demonstration. #### Load the required libraries The following code loads the required libraries. ```python import geostatspy.GSLIB as GSLIB # GSLIB utilies, visualization and wrapper import geostatspy.geostats as geostats # GSLIB methods convert to Python ``` We will also need some standard packages. These should have been installed with Anaconda 3. ```python %matplotlib inline import os # to set current working directory import sys # supress output to screen for interactive variogram modeling import io import numpy as np # arrays and matrix math import pandas as pd # DataFrames import matplotlib.pyplot as plt # plotting from matplotlib.pyplot import cm # color maps from matplotlib.patches import Ellipse # plot an ellipse import matplotlib.lines as lines import math # sqrt operator import random # random simulation locations from copy import copy # copy a colormap from scipy.stats import norm from ipywidgets import interactive # widgets and interactivity from ipywidgets import widgets from ipywidgets import Layout from ipywidgets import Label from ipywidgets import VBox, HBox from scipy.stats import norm # Gaussian distribution ``` If you get a package import error, you may have to first install some of these packages. This can usually be accomplished by opening up a command window on Windows and then typing 'python -m pip install [package-name]'. More assistance is available with the respective package docs. #### Simple, Simple Kriging Function Let's write a fast Python function to take data points and unknown location and provide the: * **simple kriging estimate** * **simple kriging variance / estimation variance** * **simple kriging weights** This provides a fast method for small datasets, with less parameters (no search parameters) and the ability to see the simple kriging weights. * we use it here for fast, flexible application of sequential simulation * the method will not work with only ones simulation location so we send 2 and only use the first result (the 2nd is always a dummy location in the workflow below. ```python def simple_simple_krige(df,xcol,ycol,vcol,dfl,xlcol,ylcol,vario,skmean): # load the variogram nst = vario['nst']; pmx = 9999.9 cc = np.zeros(nst); aa = np.zeros(nst); it = np.zeros(nst) ang = np.zeros(nst); anis = np.zeros(nst) nug = vario['nug']; sill = nug cc[0] = vario['cc1']; sill = sill + cc[0] it[0] = vario['it1']; ang[0] = vario['azi1']; aa[0] = vario['hmaj1']; anis[0] = vario['hmin1']/vario['hmaj1']; if nst == 2: cc[1] = vario['cc2']; sill = sill + cc[1] it[1] = vario['it2']; ang[1] = vario['azi2']; aa[1] = vario['hmaj2']; anis[1] = vario['hmin2']/vario['hmaj2']; # set up the required matrices rotmat, maxcov = geostats.setup_rotmat(nug,nst,it,cc,ang,pmx) ndata = len(df); a = np.zeros([ndata,ndata]); r = np.zeros(ndata); s = np.zeros(ndata); rr = np.zeros(ndata) nest = len(dfl) est = np.zeros(nest); var = np.full(nest,sill); weights = np.zeros([nest,ndata]) # Make and solve the kriging matrix, calculate the kriging estimate and variance for iest in range(0,nest): for idata in range(0,ndata): for jdata in range(0,ndata): a[idata,jdata] = geostats.cova2(df[xcol].values[idata],df[ycol].values[idata],df[xcol].values[jdata],df[ycol].values[jdata], nst,nug,pmx,cc,aa,it,ang,anis,rotmat,maxcov) r[idata] = geostats.cova2(df[xcol].values[idata],df[ycol].values[idata],dfl[xlcol].values[iest],dfl[ylcol].values[iest], nst,nug,pmx,cc,aa,it,ang,anis,rotmat,maxcov) rr[idata] = r[idata] s = geostats.ksol_numpy(ndata,a,r) sumw = 0.0 for idata in range(0,ndata): sumw = sumw + s[idata] weights[iest,idata] = s[idata] est[iest] = est[iest] + s[idata]*df[vcol].values[idata] var[iest] = var[iest] - s[idata]*rr[idata] est[iest] = est[iest] + (1.0-sumw)*skmean return est,var,weights ``` Now let's set up our dash board. ```python import warnings; warnings.simplefilter('ignore') # dashboard: number of simulation locations and variogram parameters style = {'description_width': 'initial'} l = widgets.Text(value=' Spatial Estimation with Simple Kriging: Closeness, Redundancy Demonstration, Michael Pyrcz, Associate Professor, The University of Texas at Austin',layout=Layout(width='950px', height='30px')) spacing = widgets.FloatSlider(min = 0.1, max = 5, value = 1, step =0.1, description = 'data spacing',orientation='vertical', layout=Layout(width='100px', height='200px'),continuous_update=False) spacing.style.handle_color = 'gray' nug = widgets.FloatSlider(min = 0, max = 1.0, value = 0.0, step = 0.1, description = 'nugget effect',orientation='vertical', layout=Layout(width='100px', height='200px'),continuous_update=False) nug.style.handle_color = 'gray' it1 = widgets.Dropdown(options=['Spherical', 'Exponential', 'Gaussian'],value='Spherical', description='Type1:',disabled=False,layout=Layout(width='180px', height='30px'), style=style,continuous_update=False) dip = widgets.FloatSlider(min=-90, max = 90, value = 0, step = 0.10, description = 'continuity dip', orientation='vertical',layout=Layout(width='100px', height='200px'),continuous_update=False) dip.style.handle_color = 'gray' hmaj1 = widgets.FloatSlider(min=0.01, max = 10000.0, value = 5000.0, step = 100.0, description = 'horizontal range', orientation='vertical',layout=Layout(width='100px', height='200px'),continuous_update=False) hmaj1.style.handle_color = 'gray' hmin1 = widgets.FloatSlider(min = 0.01, max = 200.0, value = 3.0, step = 0.1, description = 'vertical range', orientation='vertical',layout=Layout(width='100px', height='200px'),continuous_update=False) hmin1.style.handle_color = 'gray' uikvar = widgets.HBox([spacing,nug,it1,dip,hmaj1,hmin1],) # dashboard: data locations x1 = widgets.FloatSlider(min=0.0, max = 1000.0, value = 0.0, step = 4.0, description = 'x1',orientation='horizontal', layout=Layout(width='180px', height='30px'),readout_format = '.0f',style=style,continuous_update=False) x1.style.handle_color = 'blue' z1 = widgets.FloatSlider(min=0.0, max = 10.0, value = 5.0, step = 1.0, description = 'z1',orientation='vertical', layout=Layout(width='90px', height='180px'),readout_format = '.0f',style=style,continuous_update=False) z1.style.handle_color = 'blue' uik1 = widgets.VBox([x1,z1],) uipars = widgets.HBox([uikvar,uik1],) uik = widgets.VBox([l,uipars],) def convert_type(it): if it == 'Spherical': return 1 elif it == 'Exponential': return 2 else: return 3 def f_make_krige(spacing,nug,it1,dip,hmaj1,hmin1,x1,z1): # function to take parameters, make sample and plot text_trap = io.StringIO() # suppress all text function output to dashboard to avoid clutter sys.stdout = text_trap cmap = cm.inferno np.random.seed(seed = 73073) # ensure same results for all runs it1 = convert_type(it1) nst = 1; xlag = 10; nlag = int(hmaj1/xlag); c1 = 1.0-nug; azi = 90.0+dip vario = GSLIB.make_variogram(nug,nst,it1,c1,azi,hmaj1,hmin1) # make model object index_maj,h_maj,gam_maj,cov_maj,ro_maj = geostats.vmodel(nlag,xlag,azi,vario) # project the model in the major azimuth # project the model in the 135 azimuth index_min,h_min,gam_min,cov_min,ro_min = geostats.vmodel(nlag,xlag,azi+90.0,vario) # project the model in the minor azimuth seed = 73073 # make static hard data dataframe and hard code the data values number = int(10.0/spacing + 1) z = np.linspace(0,10,number) x = np.zeros(len(z)); value = np.zeros(len(z)) df = pd.DataFrame({'X':x,'Z':z,'Value':value}) ndata = len(df); skmean = np.average(df['Value'].values) # make the estimate locations (we will need some dummy locations, the code doesn't work with 1 location) dfl = pd.DataFrame({'X':[x1,-99999,99999],'Z':[z1,-99999,99999], 'Value':[0.0,-99999,-99999]},dtype=np.single) sk_est_temp, sk_var_temp, sk_weights_temp = simple_simple_krige(df,'X','Z','Value',dfl,'X','Z',vario,skmean=skmean) sk_est = sk_est_temp[0] sk_var = sk_var_temp[0] df['weights'] = sk_weights_temp[0,:] # get the data weights for the first unknown location, the others are dummy locations print('weights'); print(sk_weights_temp[0,:]) # plot the variogram model xlag = 10.0; nlag = int(hmaj1/xlag) plt.subplot(1,3,1) plt.plot([0,hmaj1*1.5],[1.0,1.0],color = 'black') plt.plot(h_maj,gam_maj,color = 'black',label = 'Horizontal at Dip ' + str(azi-90)) plt.plot(h_min,gam_min,color = 'gray',label = 'Vertical, Orthogonal to Dip') deltas = [75.0,60.0,45.0,30.0,15.0]; ndelta = len(deltas); hd = np.zeros(ndelta); gamd = np.zeros(ndelta); color=iter(cm.plasma(np.linspace(0,1,ndelta))) for delta in deltas: index,hd,gamd,cov,ro = geostats.vmodel(nlag,xlag,dip+delta,vario); c=next(color) plt.plot(hd,gamd,color = c,label = 'Dip ' + str(round(dip+delta-90.0,2))) plt.xlabel(r'Lag Distance $\bf(h)$, (m)') plt.ylabel(r'$\gamma \bf(h)$') plt.title('Interpolated Directional Variogram Models') plt.xlim([0,hmaj1*1.5]) plt.ylim([0,1.4]) plt.legend(loc='lower right') max_weight = np.max(df['weights'].values) # plot the data and simulated values on a scatter plot plt.subplot(1,3,2) # plot the data colored by their weights my_plot = plt.scatter(df['X'],df['Z'],marker='o', c = df['weights'], cmap = cmap, vmin = 0.0, vmax = 1.2, linewidths = 0.01, edgecolors = 'black', s = 500/number,label = 'Original Data', alpha = 0.6) # make a custom line chart on the scatter plot for the weights pos = df['X'].values + df['weights'].values/1.0 * 500 print('pos'); print(pos) line = lines.Line2D([0, 500], [0, 0], color = 'grey') line2 = lines.Line2D([500, 500], [0, -0.2], color = 'grey') line3 = lines.Line2D([400, 400], [0, -0.2], color = 'grey') line4 = lines.Line2D([300, 300], [0, -0.2], color = 'grey') line5 = lines.Line2D([200, 200], [0, -0.2], color = 'grey') line6 = lines.Line2D([100, 100], [0, -0.2], color = 'grey') ax = plt.gca() ax.add_line(line); ax.add_line(line2); ax.add_line(line3); ax.add_line(line4); ax.add_line(line5); ax.add_line(line6) ax.annotate('Weights', (220.0,-1.8)); ax.annotate('1.0', (480.0,-0.9)) ax.annotate('0.8', (380.0,-0.9)); ax.annotate('0.6', (280.0,-0.9)); ax.annotate('0.4', (180.0,-0.9)); ax.annotate('0.2', (80.0,-0.9)) plt.plot(pos,df['Z'].values,'-',color = 'red',alpha = 0.6) plt.scatter(x1,z1,marker='^', color = 'red', alpha = 0.6,edgecolors = 'black') ax.annotate('?', (x1+10,z1+0.10)) # add labels and colorbar plt.xlabel('X(m)'); plt.ylabel('Z(m)') plt.title('Spatial String of Data with Weights and Unknown Location') plt.xlim([-10,1000]) plt.ylim([-2,12]) cbar = plt.colorbar(my_plot, orientation="vertical", ticks=np.linspace(0, 1.2, 13)) cbar.set_label('Simple Kriging Weights', rotation=270, labelpad=20) # add continuity ranges ellipse = Ellipse((x1, z1),width=hmin1*2.0,height=hmaj1*2.0,angle = 360-azi,facecolor='gray',alpha = 0.1) ax.add_patch(ellipse) plt.subplots_adjust(left=0.0, bottom=0.0, right=3.2, top=0.9, wspace=0.3, hspace=0.3) plt.show() # connect the function to make the samples and plot to the widgets interactive_plot = widgets.interactive_output(f_make_krige, {'spacing':spacing,'nug':nug, 'it1':it1, 'dip':dip, 'hmaj1':hmaj1, 'hmin1':hmin1, 'x1':x1, 'z1':z1,}) #interactive_plot.clear_output(wait = True) # reduce flickering by delaying plot updating ``` ### Interactive Kriging String Effect * select the variogram model and the data locations and observe data weights along a string of data #### Michael Pyrcz, Associate Professor, University of Texas at Austin ##### [Twitter](https://twitter.com/geostatsguy) | [GitHub](https://github.com/GeostatsGuy) | [Website](http://michaelpyrcz.com) | [GoogleScholar](https://scholar.google.com/citations?user=QVZ20eQAAAAJ&hl=en&oi=ao) | [Book](https://www.amazon.com/Geostatistical-Reservoir-Modeling-Michael-Pyrcz/dp/0199731446) | [YouTube](https://www.youtube.com/channel/UCLqEr-xV-ceHdXXXrTId5ig) | [LinkedIn](https://www.linkedin.com/in/michael-pyrcz-61a648a1) | [GeostatsPy](https://github.com/GeostatsGuy/GeostatsPy) ### The Inputs Select the variogram model and the data locations: * **nug**: nugget effect * **c1**: contributions of the sill * **hmaj1** / **hmin1**: range in the major and minor direction * **(x1, y1)**: spatial data location ```python display(uik, interactive_plot) # display the interactive plot ``` VBox(children=(Text(value=' Spatial Estimation with Simple Kriging: Closeness, Redundancy Demonstratio… Output(outputs=({'output_type': 'display_data', 'data': {'text/plain': '<Figure size 432x288 with 3 Axes>', 'i… #### Comments This was an interactive demonstration of simple kriging with strings of data. Much more could be done, I have other demonstrations on the basics of working with DataFrames, ndarrays, univariate statistics, plotting data, declustering, data transformations and many other workflows available at https://github.com/GeostatsGuy/PythonNumericalDemos and https://github.com/GeostatsGuy/GeostatsPy. #### The Author: ### Michael Pyrcz, Associate Professor, University of Texas at Austin *Novel Data Analytics, Geostatistics and Machine Learning Subsurface Solutions* With over 17 years of experience in subsurface consulting, research and development, Michael has returned to academia driven by his passion for teaching and enthusiasm for enhancing engineers' and geoscientists' impact in subsurface resource development. For more about Michael check out these links: #### [Twitter](https://twitter.com/geostatsguy) | [GitHub](https://github.com/GeostatsGuy) | [Website](http://michaelpyrcz.com) | [GoogleScholar](https://scholar.google.com/citations?user=QVZ20eQAAAAJ&hl=en&oi=ao) | [Book](https://www.amazon.com/Geostatistical-Reservoir-Modeling-Michael-Pyrcz/dp/0199731446) | [YouTube](https://www.youtube.com/channel/UCLqEr-xV-ceHdXXXrTId5ig) | [LinkedIn](https://www.linkedin.com/in/michael-pyrcz-61a648a1) #### Want to Work Together? I hope this content is helpful to those that want to learn more about subsurface modeling, data analytics and machine learning. Students and working professionals are welcome to participate. * Want to invite me to visit your company for training, mentoring, project review, workflow design and / or consulting? I'd be happy to drop by and work with you! * Interested in partnering, supporting my graduate student research or my Subsurface Data Analytics and Machine Learning consortium (co-PIs including Profs. Foster, Torres-Verdin and van Oort)? My research combines data analytics, stochastic modeling and machine learning theory with practice to develop novel methods and workflows to add value. We are solving challenging subsurface problems! * I can be reached at [email protected]. I'm always happy to discuss, *Michael* Michael Pyrcz, Ph.D., P.Eng. Associate Professor The Hildebrand Department of Petroleum and Geosystems Engineering, Bureau of Economic Geology, The Jackson School of Geosciences, The University of Texas at Austin #### More Resources Available at: [Twitter](https://twitter.com/geostatsguy) | [GitHub](https://github.com/GeostatsGuy) | [Website](http://michaelpyrcz.com) | [GoogleScholar](https://scholar.google.com/citations?user=QVZ20eQAAAAJ&hl=en&oi=ao) | [Book](https://www.amazon.com/Geostatistical-Reservoir-Modeling-Michael-Pyrcz/dp/0199731446) | [YouTube](https://www.youtube.com/channel/UCLqEr-xV-ceHdXXXrTId5ig) | [LinkedIn](https://www.linkedin.com/in/michael-pyrcz-61a648a1)
{"hexsha": "460f96e1b813815b68c01c7150d2777ce1c95c74", "size": 31983, "ext": "ipynb", "lang": "Jupyter Notebook", "max_stars_repo_path": "Interactive_String_Effect.ipynb", "max_stars_repo_name": "AndrewAnnex/PythonNumericalDemos", "max_stars_repo_head_hexsha": "11a7dec09bf08527b358fa95119811b6f73023b5", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-12-23T16:03:43.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-23T16:03:43.000Z", "max_issues_repo_path": "Interactive_String_Effect.ipynb", "max_issues_repo_name": "AndrewAnnex/PythonNumericalDemos", "max_issues_repo_head_hexsha": "11a7dec09bf08527b358fa95119811b6f73023b5", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Interactive_String_Effect.ipynb", "max_forks_repo_name": "AndrewAnnex/PythonNumericalDemos", "max_forks_repo_head_hexsha": "11a7dec09bf08527b358fa95119811b6f73023b5", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 55.2383419689, "max_line_length": 512, "alphanum_fraction": 0.606290842, "converted": true, "num_tokens": 7098}
function flg = checkDictUse % Since MATLAB 7.3(2006A) and 7.4(2006B) doesnot support dicom dictionary % this function checks the matlab version and returns the flag to allow the % use of dictionary. % % DK % % Copyright 2010, Joseph O. Deasy, on behalf of the CERR development team. % % This file is part of The Computational Environment for Radiotherapy Research (CERR). % % CERR development has been led by: Aditya Apte, Divya Khullar, James Alaly, and Joseph O. Deasy. % % CERR has been financially supported by the US National Institutes of Health under multiple grants. % % CERR is distributed under the terms of the Lesser GNU Public License. % % This version of CERR is free software: you can redistribute it and/or modify % it under the terms of the GNU General Public License as published by % the Free Software Foundation, either version 3 of the License, or % (at your option) any later version. % % CERR is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; % without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. % See the GNU General Public License for more details. % % You should have received a copy of the GNU General Public License % along with CERR. If not, see <http://www.gnu.org/licenses/>. % set flag to 1 so as to always use Dictionary flg = 1; mathVer = version; if str2num(mathVer(1:3)) > 7.3 flg = 0; end return
{"author": "cerr", "repo": "CERR", "sha": "d320754abad9dcb78508ab69f33ae9f644202114", "save_path": "github-repos/MATLAB/cerr-CERR", "path": "github-repos/MATLAB/cerr-CERR/CERR-d320754abad9dcb78508ab69f33ae9f644202114/CERR_core/Importing/checkDictUse.m"}
#This software is Copyright 2012 The Regents of the University of California. All Rights Reserved. #Permission to use, copy, modify, and distribute this software and its documentation for educational, research and non-profit purposes for non-profit institutions, without fee, and without a written agreement is hereby granted, provided that the above copyright notice, this paragraph and the following three paragraphs appear in all copies. #Permission to make commercial use of this software may be obtained by contacting: #Technology Transfer Office #9500 Gilman Drive, Mail Code 0910 #University of California #La Jolla, CA 92093-0910 #(858) 534-5815 #[email protected] #This software program and documentation are copyrighted by The Regents of the University of California. The software program and documentation are supplied "as is", without any accompanying services from The Regents. The Regents does not warrant that the operation of the program will be uninterrupted or error-free. The end-user understands that the program was developed for research purposes and is advised not to rely exclusively on the program for any reason. #IN NO EVENT SHALL THE UNIVERSITY OF CALIFORNIA BE LIABLE TO #ANY PARTY FOR DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR #CONSEQUENTIAL DAMAGES, INCLUDING LOST PROFITS, ARISING #OUT OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, #EVEN IF THE UNIVERSITY OF CALIFORNIA HAS BEEN ADVISED OF #THE POSSIBILITY OF SUCH DAMAGE. THE UNIVERSITY OF #CALIFORNIA SPECIFICALLY DISCLAIMS ANY WARRANTIES, #INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF #MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. #THE SOFTWARE PROVIDED HEREUNDER IS ON AN "AS IS" BASIS, AND THE UNIVERSITY OF CALIFORNIA HAS NO OBLIGATIONS TO #PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR #MODIFICATIONS. # Experimental code for neural network. # Currently not using this. from numpy import * from scipy import ndimage from data_viewer import borderWidthForFeatures class NeuralNetwork(): """Neural network class""" def __init__(self, inputVolume): self.inputVolume = inputVolume self.outputVolume = zeros(inputVolume.shape, dtype=float32) self.weights = ones((11, 11, 1), dtype=float32) def update(self): """Update network output""" sh = self.weights.shape ndimage.convolve(self.inputVolume, self.weights / float(sh[0] * sh[1] * sh[2]), output=self.outputVolume) self.outputVolume[:] = tanh(self.outputVolume)[:] def getOutput(self): """Get network output""" return self.outputVolume class NeuralNetworkLearner: def __init__(self, inputVolume, outputVolume): self.network = NeuralNetwork(inputVolume) self.trainingOutputVolume = outputVolume self.updateCount = 0 def error(self): """Calculate error""" total = 0 self.network.update() v = self.trainingOutputVolume for x in range(borderWidthForFeatures[0], v.shape[0]-borderWidthForFeatures[0]): #print x, "out of", v.shape[0]-borderWidthForFeatures-1 for y in range(borderWidthForFeatures[1], v.shape[1]-borderWidthForFeatures[1]): for z in range(borderWidthForFeatures[2], v.shape[2]-borderWidthForFeatures[2]): total += abs(self.network.outputVolume[x,y,z] - self.trainingOutputVolume[x,y,z]) return total def update(self): """Update weights""" #step = 0.5 step = 5.0# * pow(0.9, self.updateCount) error = self.error() print "NeuralNetwork error: %f" % error weights = self.network.weights sh = weights.shape for x in range(sh[0]): for y in range(sh[1]): for z in range(sh[2]): weight = weights[x, y, z] weights[x, y, z] = weight + step errorForIncrease = self.error() #print "errorForIncrease", errorForIncrease if errorForIncrease < error: #self.network.offsets[key] = weight + step pass else: weights[x, y, z] = weight - step errorForDecrease = self.error() #print "errorForDecrease", errorForDecrease if errorForDecrease < error: #weights[x, y, z] = weight - step pass else: weights[x, y, z] = weight self.updateCount += 1
{"hexsha": "fa30778099bed618a7f24238a61ca0040fd414d4", "size": 4754, "ext": "py", "lang": "Python", "max_stars_repo_path": "cytoseg/neural_network.py", "max_stars_repo_name": "slash-segmentation/DP2", "max_stars_repo_head_hexsha": "6f768e4b8a75a3ab2bf1359ae94704332426a4d6", "max_stars_repo_licenses": ["Unlicense"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "cytoseg/neural_network.py", "max_issues_repo_name": "slash-segmentation/DP2", "max_issues_repo_head_hexsha": "6f768e4b8a75a3ab2bf1359ae94704332426a4d6", "max_issues_repo_licenses": ["Unlicense"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "cytoseg/neural_network.py", "max_forks_repo_name": "slash-segmentation/DP2", "max_forks_repo_head_hexsha": "6f768e4b8a75a3ab2bf1359ae94704332426a4d6", "max_forks_repo_licenses": ["Unlicense"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 37.7301587302, "max_line_length": 465, "alphanum_fraction": 0.6396718553, "include": true, "reason": "from numpy,from scipy", "num_tokens": 1025}
# SPDX-License-Identifier: MIT from enum import IntEnum from math import log2 from collections import Counter from sys import stderr try: from scipy.stats import chi2, kstest, uniform except ImportError: print('the scipy library is not installed. \n' 'Use `pip install scipy` to install it', file=stderr) exit(1) class ResultFlag(IntEnum): NONE = 0 SINGLE_BYTE_PATTERN = 1 NOT_RANDOM = 2 RANDOM = 3 RANDOMNESS_SUSPICIOUSLY_HIGH = 4 class ShannonsEntropy: def __init__(self, sector_size, rand_lim=None, sus_rand_lim=None): self.sector_size = sector_size def calc(self, buf): """ Calculates and returns sample entropy on byte level for the argument and single byte pattern if present or None. This code has been adapted from https://gitlab.com/cryptsetup/cryptsetup/-/blob/master/misc/keyslot_checker/chk_luks_keyslots.c#L81 """ freq = Counter(buf) if len(freq) == 1: return 0.0, ResultFlag.SINGLE_BYTE_PATTERN, freq.popitem()[0] entropy = sum(f * log2(f) for f in freq.values()) / self.sector_size - log2(self.sector_size) normalized_entropy = abs(entropy) / 8 return normalized_entropy, ResultFlag.NONE, None class ChiSquare8: def __init__(self, sector_size, rand_lim=0.9999, sus_rand_lim=0.0001): self.expected = sector_size / 256 if self.expected < 5: print('warn: the sector size seems to be too small to use with this calculation method.', file=stderr) self.random_limit = chi2.ppf(rand_lim, 255) * self.expected self.sus_random_limit = chi2.ppf(sus_rand_lim, 255) * self.expected self.max_chis = (sector_size - self.expected) ** 2 + 255 * self.expected ** 2 def calc(self, buf): counts = Counter(buf) if len(counts) == 1: return 0.0, ResultFlag.SINGLE_BYTE_PATTERN, counts.popitem()[0] chis = sum((counts[i] - self.expected) ** 2 for i in range(256)) randomness = 1 - chis / self.max_chis if chis < self.sus_random_limit: return randomness, ResultFlag.RANDOMNESS_SUSPICIOUSLY_HIGH, None if chis <= self.random_limit: return randomness, ResultFlag.RANDOM, None return randomness, ResultFlag.NOT_RANDOM, None class ChiSquare4: def __init__(self, sector_size, rand_lim=0.9999, sus_rand_lim=0.0001): self.expected = sector_size / 8 if self.expected < 5: print('warn: the sector size seems to be too small to use with this calculation method.', file=stderr) self.random_limit = chi2.ppf(rand_lim, 15) * self.expected self.sus_random_limit = chi2.ppf(sus_rand_lim, 15) * self.expected self.max_chis = (sector_size - self.expected) ** 2 + 255 * self.expected ** 2 def calc(self, buf): counts = Counter(buf) if len(counts) == 1: return 0.0, ResultFlag.SINGLE_BYTE_PATTERN, counts.popitem()[0] vals = [0] * 16 for byte, count in counts.items(): vals[byte & 15] += count vals[byte >> 4] += count chis = sum((i - self.expected) ** 2 for i in vals) randomness = 1 - chis / self.max_chis if chis < self.sus_random_limit: return randomness, ResultFlag.RANDOMNESS_SUSPICIOUSLY_HIGH, None if chis <= self.random_limit: return randomness, ResultFlag.RANDOM, None return randomness, ResultFlag.NOT_RANDOM, None class ChiSquare3: N = 3 def __init__(self, sector_size, rand_lim=0.9999, sus_rand_lim=0.0001): self.single_byte_pattern_count = (sector_size * 8) // self.N self.expected = ((sector_size * 8) // self.N) / (2 ** self.N) if self.expected < 5: print('warn: the sector size seems to be too small to use with this calculation method.', file=stderr) self.random_limit = chi2.ppf(rand_lim, 2 ** self.N - 1) * self.expected self.sus_random_limit = chi2.ppf(sus_rand_lim, 2 ** self.N - 1) * self.expected self.max_chis = (sector_size - self.expected) ** 2 + 255 * self.expected ** 2 def calc(self, buf): vals = [0] * (2 ** self.N) rem = 0 bits = 0 for byte in buf: for offset in range(8): rem = (rem << 1) | ((byte >> (7 - offset)) & 1) bits += 1 if bits == self.N: vals[rem] += 1 bits = 0 rem = 0 if vals[0] == self.single_byte_pattern_count: return 0.0, ResultFlag.SINGLE_BYTE_PATTERN, 0 if vals[-1] == self.single_byte_pattern_count: return 0.0, ResultFlag.SINGLE_BYTE_PATTERN, 255 chis = sum((i - self.expected) ** 2 for i in vals) randomness = 1 - chis / self.max_chis if chis < self.sus_random_limit: return randomness, ResultFlag.RANDOMNESS_SUSPICIOUSLY_HIGH, None if chis <= self.random_limit: return randomness, ResultFlag.RANDOM, None return randomness, ResultFlag.NOT_RANDOM, None class ChiSquare1: def __init__(self, sector_size, rand_lim=0.9999, sus_rand_lim=0.0001): self.single_byte_pattern_count = sector_size * 8 self.expected = sector_size * 4 if self.expected < 5: print('warn: the sector size seems to be too small to use with this calculation method.', file=stderr) self.random_limit = chi2.ppf(rand_lim, 1) * self.expected self.sus_random_limit = chi2.ppf(sus_rand_lim, 1) * self.expected self.max_chis = (sector_size - self.expected) ** 2 + 255 * self.expected ** 2 def calc(self, buf): set_bits = sum(map(int.bit_count, buf)) if set_bits == 0: return 0.0, ResultFlag.SINGLE_BYTE_PATTERN, 0 if set_bits == self.single_byte_pattern_count: return 0.0, ResultFlag.SINGLE_BYTE_PATTERN, 255 chis = 2 * (set_bits - self.expected) ** 2 randomness = 1 - chis / self.max_chis if chis < self.sus_random_limit: return randomness, ResultFlag.RANDOMNESS_SUSPICIOUSLY_HIGH, None if chis <= self.random_limit: return randomness, ResultFlag.RANDOM, None return randomness, ResultFlag.NOT_RANDOM, None class KSTest: def __init__(self, sector_size, rand_lim=0.9999, sus_rand_lim=0.0001): self.p_rand_lim = 1 - rand_lim self.p_sus_rand_lim = 1 - sus_rand_lim self.dist = uniform(0, 255).cdf def calc(self, buf): _, p = kstest(bytearray(buf), self.dist, mode='asymp') if p > self.p_sus_rand_lim: return 0.0, ResultFlag.RANDOMNESS_SUSPICIOUSLY_HIGH, None if p < self.p_rand_lim: return 0.5, ResultFlag.NOT_RANDOM, None return 1.0, ResultFlag.RANDOM, None analysis_methods = { 'shannon': ShannonsEntropy, 'chi2-8': ChiSquare8, 'chi2-4': ChiSquare4, 'chi2-3': ChiSquare3, 'chi2-1': ChiSquare1, 'kstest': KSTest }
{"hexsha": "f67df7f5f5999535c02ad069cd1b70a7f925bc89", "size": 7046, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/analysis.py", "max_stars_repo_name": "malon43/entropy-visualization", "max_stars_repo_head_hexsha": "f5a2da84b105ac00fc39b94eff32394c7a43819b", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/analysis.py", "max_issues_repo_name": "malon43/entropy-visualization", "max_issues_repo_head_hexsha": "f5a2da84b105ac00fc39b94eff32394c7a43819b", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/analysis.py", "max_forks_repo_name": "malon43/entropy-visualization", "max_forks_repo_head_hexsha": "f5a2da84b105ac00fc39b94eff32394c7a43819b", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-12-03T10:12:58.000Z", "max_forks_repo_forks_event_max_datetime": "2021-12-03T10:12:58.000Z", "avg_line_length": 38.9281767956, "max_line_length": 114, "alphanum_fraction": 0.6294351405, "include": true, "reason": "from scipy", "num_tokens": 1917}
using BufferedStreams using Test struct InfiniteStream <: IO byte::UInt8 end function Base.eof(stream::InfiniteStream) return false end function Base.read(stream::InfiniteStream, ::Type{UInt8}) return stream.byte end if isdefined(Base, :unsafe_read) function Base.unsafe_read(stream::InfiniteStream, pointer::Ptr{UInt8}, n::UInt) ccall(:memset, Cvoid, (Ptr{Cvoid}, Cint, Csize_t), pointer, stream.byte, n) return nothing end end # A few things that might not be obvious: # * In a few places we wrap an array in an IOBuffer before wrapping it in a # BufferedInputStream. This is to force the BufferedInputStream to read # incrementally to expose possible bugs in buffer refilling logic. # * Similar, we manually set the buffer size to be smaller than the default to # force more buffer refills. @testset "BufferedInputStream" begin @testset "read" begin data = rand(UInt8, 1000000) stream = BufferedInputStream(IOBuffer(data), 1024) read_data = UInt8[] while !eof(stream) push!(read_data, read(stream, UInt8)) end @test data == read_data @test data == read(BufferedInputStream(IOBuffer(data), 1024)) halfn = div(length(data), 2) @test data[1:halfn] == read(BufferedInputStream(IOBuffer(data), 1024), halfn) # read multi-byte data buffer = IOBuffer() write(buffer, UInt8(1)) write(buffer, UInt16(2)) write(buffer, UInt32(3)) write(buffer, UInt64(4)) write(buffer, UInt128(5)) write(buffer, Float16(6)) write(buffer, Float32(7)) write(buffer, Float64(8)) seekstart(buffer) stream = BufferedInputStream(buffer) @test read(stream, UInt8) === UInt8(1) @test read(stream, UInt16) === UInt16(2) @test read(stream, UInt32) === UInt32(3) @test read(stream, UInt64) === UInt64(4) @test read(stream, UInt128) === UInt128(5) @test read(stream, Float16) === Float16(6) @test read(stream, Float32) === Float32(7) @test read(stream, Float64) === Float64(8) # EOFError stream = BufferedInputStream(IOBuffer()) @test_throws EOFError read(stream, UInt8) @test_throws EOFError read(stream, UInt16) @test_throws EOFError read(stream, UInt32) @test_throws EOFError read(stream, UInt64) @test_throws EOFError read(stream, UInt128) @test_throws EOFError read(stream, Float16) @test_throws EOFError read(stream, Float32) @test_throws EOFError read(stream, Float64) end if isdefined(Base, :unsafe_read) @testset "unsafe_read" begin stream = BufferedInputStream(IOBuffer("abcdefg"), 3) data = Vector{UInt8}(undef, 7) unsafe_read(stream, pointer(data, 1), 1) @test data[1] == UInt8('a') unsafe_read(stream, pointer(data, 2), 2) unsafe_read(stream, pointer(data, 4), 4) @test data == b"abcdefg" @test_throws EOFError unsafe_read(stream, pointer(data), 1) end end @testset "peek" begin stream = BufferedInputStream(IOBuffer([0x01, 0x02])) @test peek(stream) === 0x01 @test peek(stream) === 0x01 read(stream, UInt8) @test peek(stream) === 0x02 @test peek(stream) === 0x02 end @testset "bytesavailable" begin stream = BufferedInputStream(IOBuffer([0x01, 0x02])) @test bytesavailable(stream) == 2 read(stream, 1) @test bytesavailable(stream) == 1 read(stream, 1) @test bytesavailable(stream) == 0 end @testset "peekbytes!" begin data = rand(UInt8, 1000000) stream = BufferedInputStream(IOBuffer(data), 1024) read_data = Array{UInt8}(undef, 1000) @test peekbytes!(stream, read_data, 1000) == 1000 @test data[1:1000] == read_data # Check that we read the bytes we just peeked, i.e. that the position # wasn't advanced on peekbytes!() readbytes!(stream, read_data, 1000) @test data[1:1000] == read_data # Check that we now peek the next 24 bytes, as we don't go past the end # of the buffer @test peekbytes!(stream, read_data, 1000) == 24 @test data[1001:1024] == read_data[1:24] # Reset the data data = rand(UInt8, 1000000) stream = BufferedInputStream(IOBuffer(data), 1024) read_data = Array{UInt8}(undef, 2000) @test peekbytes!(stream, read_data, 2000) == 1024 # Note that we truncate at buffer size, as @test data[1:1024] == read_data[1:1024] # Check that we only read up to the buffer size read_data = Array{UInt8}(undef, 5) @test peekbytes!(stream, read_data) == 5 @test data[1:5] == read_data close(stream) @test_throws Exception peekbytes!(stream, read_data) end @testset "readbytes!" begin stream = BufferedInputStream(IOBuffer([0x01:0xff;]), 4) @test !eof(stream) out = zeros(UInt8, 2) @test BufferedStreams.readbytes!(stream, out) === 2 @test out == [0x01, 0x02] out = zeros(UInt8, 3) @test BufferedStreams.readbytes!(stream, out) === 3 @test out == [0x03, 0x04, 0x05] out = zeros(UInt8, 5) @test BufferedStreams.readbytes!(stream, out) === 5 @test out == [0x06, 0x07, 0x08, 0x09, 0x0a] @test !eof(stream) @test read(stream) == [0x0b:0xff;] @test eof(stream) end @testset "readuntil" begin data = rand(UInt8, 1000000) stream = BufferedInputStream(IOBuffer(data), 1024) true_num_zeros = 0 zero_positions = Int[] for (i, b) in enumerate(data) if b == 0x00 push!(zero_positions, i) true_num_zeros += 1 end end num_zeros = 0 chunk_results = Bool[] while true # are we extracting the right chunk? chunk = readuntil(stream, 0x00) first = num_zeros == 0 ? 1 : zero_positions[num_zeros]+1 last = num_zeros < length(zero_positions) ? zero_positions[num_zeros+1] : length(data) true_chunk = data[first:last] push!(chunk_results, true_chunk == chunk) if !eof(stream) num_zeros += 1 else break end end @test all(chunk_results) @test num_zeros == true_num_zeros end @testset "arrays" begin data = rand(UInt8, 1000000) stream = BufferedInputStream(data) read_data = UInt8[] while !eof(stream) push!(read_data, read(stream, UInt8)) end @test data == read_data @test data == read(BufferedInputStream(data)) end @testset "marks" begin # very small buffer stream = BufferedInputStream(IOBuffer([0x01:0xff;]), 2) @test !ismarked(stream) mark(stream) @test ismarked(stream) a = read(stream, UInt8) b = read(stream, UInt8) c = read(stream, UInt8) reset(stream) @test !ismarked(stream) a′ = read(stream, UInt8) b′ = read(stream, UInt8) c′ = read(stream, UInt8) @test (a, b, c) == (a′, b′, c′) == (0x01, 0x02, 0x03) mark(stream) @test unmark(stream) @test !unmark(stream) @test_throws ErrorException reset(stream) end @testset "anchors" begin data = rand(UInt8, 100000) function random_range(n) a = rand(1:n) b = rand(a:n) return a:b end # test that anchors work correctly by extracting random intervals from a # buffered stream. function test_anchor() r = random_range(length(data)) i = 1 stream = BufferedInputStream(IOBuffer(data), 1024) while !eof(stream) if i == r.start anchor!(stream) end if i == r.stop + 1 return takeanchored!(stream) == data[r] end read(stream, UInt8) i += 1 end if i == r.stop + 1 return takeanchored!(stream) == data[r] end error("nothing extracted (range: $r)") end @test all(Bool[test_anchor() for _ in 1:100]) end @testset "seek" begin n = 100000 data = rand(UInt8, n) positions = rand(0:n-1, 1000) function test_seek(stream, p) seek(stream, p) return position(stream) == p && read(stream, UInt8) == data[p + 1] end stream = BufferedInputStream(data) @test all(Bool[test_seek(stream, p) for p in positions]) stream = BufferedInputStream(IOBuffer(data), 1024) @test all(Bool[test_seek(stream, p) for p in positions]) end @testset "skip" begin n = 100000 data = rand(UInt8, n) positions = rand(0:n-1, 1000) sort!(positions) last = 1 offsets = Int[] for p in positions push!(offsets, p - last) last = p end function test_skip(stream, p, offset) skip(stream, offset) peek(stream) == data[p] end stream = BufferedInputStream(IOBuffer(data), 1024) @test_throws Exception skip(stream, n + 1) @test_throws Exception skip(stream, -1) stream = BufferedInputStream(IOBuffer(data), 1024) @test all(Bool[test_skip(stream, p, offset) for (p, offset) in zip(positions, offsets)]) end @testset "close" begin iobuffer = IOBuffer([0x00, 0x01]) stream = BufferedInputStream(iobuffer) @test isopen(stream) @test isopen(iobuffer) read(stream, UInt8) @test close(stream) === nothing @test !isopen(stream) @test !isopen(iobuffer) @test_throws Exception read(stream, UInt8) @test close(stream) === nothing end @testset "iostream" begin mktemp() do path, input write(input, [0x01, 0x02, 0x03, 0x04, 0x05]) flush(input) seekstart(input) stream = BufferedInputStream(input, 2) @test !eof(stream) @test read(stream, UInt8) === 0x01 @test !eof(stream) @test read(stream, UInt8) === 0x02 @test !eof(stream) @test read(stream) == [0x03, 0x04, 0x05] @test eof(stream) @test isopen(stream) @test isopen(input) close(stream) @test !isopen(stream) @test !isopen(input) end end @testset "immobilized buffer" begin stream = BufferedInputStream(IOBuffer("abcdefg"), 2) stream.immobilized = false @assert read(stream, UInt8) == UInt8('a') mark(stream) @assert read(stream, UInt8) == UInt8('b') BufferedStreams.fillbuffer!(stream) @test stream.buffer[1] == UInt8('b') stream = BufferedInputStream(IOBuffer("abcdefg"), 2) stream.immobilized = true @assert read(stream, UInt8) == UInt8('a') mark(stream) @assert read(stream, UInt8) == UInt8('b') BufferedStreams.fillbuffer!(stream) @test stream.buffer[2] == UInt8('b') stream = BufferedInputStream(IOBuffer("abcdefg"), 6) stream.immobilized = true data = Vector{UInt8}(undef, 7) BufferedStreams.readbytes!(stream, data, 1, 3) @test data[1:3] == b"abc" BufferedStreams.readbytes!(stream, data, 4, 7) @test data[4:7] == b"defg" end @testset "shiftdata!" begin stream = BufferedInputStream(IOBuffer("abcdefg"), 2) read(stream, 1) @test BufferedStreams.shiftdata!(stream) > 0 read(stream, 2) @test BufferedStreams.shiftdata!(stream) > 0 end @testset "misc." begin stream = BufferedInputStream(IOBuffer("foobar"), 10) @test !BufferedStreams.ensurebuffered!(stream, 10) repr_regex = r"^BufferedInputStream{.*}\(<.* \d+% filled>\)$" @test occursin(repr_regex, repr(stream)) stream = BufferedInputStream(IOBuffer("foobar"), 4 * 2^10) @test occursin(repr_regex, repr(stream)) @test occursin(r"KiB", repr(stream)) close(stream) @test occursin(r"^BufferedInputStream{.*}\(<closed>\)$", repr(stream)) @test_throws ArgumentError BufferedInputStream(IOBuffer("foo"), 0) end @testset "massive read" begin byte = 0x34 bufsize = 1024 stream = BufferedInputStream(InfiniteStream(byte), bufsize) # read byte by byte ok = true for i in 1:2^30 ok = ok && (read(stream, UInt8) == byte) end @test ok @test length(stream.buffer) == 1024 # read in one shot @test all(read(stream, 5 * bufsize) .== byte) @test length(stream.buffer) == 1024 end @testset "readavailable" begin stream = BufferedInputStream(IOBuffer("some data")) @test readavailable(stream) == b"some data" end end @testset "BufferedOutputStream" begin @testset "write" begin data = rand(UInt8, 1000000) stream1 = BufferedOutputStream() sink = IOBuffer() stream2 = BufferedOutputStream(sink, 1024) for c in data write(stream1, c) write(stream2, c) end flush(stream1) flush(stream2) @test take!(stream1) == data @test take!(sink) == data close(stream1) close(stream2) @test !isopen(sink) end @testset "arrays" begin expected = UInt8[] stream1 = BufferedOutputStream() sink = IOBuffer() stream2 = BufferedOutputStream(sink, 1024) for _ in 1:1000 data = rand(UInt8, rand(1:1000)) append!(expected, data) write(stream1, data) write(stream2, data) end flush(stream1) flush(stream2) @test take!(stream1) == expected @test take!(sink) == expected close(stream1) close(stream2) end @testset "takebuf_string" begin data = rand('A':'z', 1000000) iobuf = IOBuffer() stream = BufferedOutputStream() for c in data write(stream, c) write(iobuf, c) end @test String(take!((stream))) == String(take!((iobuf))) end @testset "write_result" begin sink = IOBuffer() stream = BufferedOutputStream(sink, 16) for len in 0:10:100 result = write(stream, repeat("x", len)) @test result == len end end @testset "close" begin iobuffer = IOBuffer() stream = BufferedOutputStream(iobuffer) @test isopen(stream) @test isopen(iobuffer) write(stream, 0x00) close(stream) @test !isopen(stream) @test !isopen(iobuffer) @test_throws Exception write(stream, 0x00) end @testset "vector sink" begin sink = UInt8[] stream = BufferedOutputStream(sink) write(stream, 0x00) write(stream, 0x01) write(stream, 0x02) flush(stream) @test take!(stream) == [0x00, 0x01, 0x02] @test isopen(stream) close(stream) @test !isopen(stream) @test_throws Exception write(stream, 0x00) end @testset "iostream" begin mktemp() do path, out stream = BufferedOutputStream(out, 10) write(stream, "hello") @test stat(path).size == 0 write(stream, "world") @test stat(path).size == 0 write(stream, "!") # BufferedOutputStream buffer has run out of space, # but IOStream buffer has not @test stat(path).size == 0 flush(stream) @test stat(path).size == 11 write(stream, "...") @test stat(path).size == 11 close(stream) @test !isopen(out) @test stat(path).size == 14 end end @testset "misc." begin stream = BufferedOutputStream(IOBuffer(), 5) @test eof(stream) @test pointer(stream) == pointer(stream.buffer) @test occursin(r"^BufferedOutputStream{.*}\(<.* \d+% filled>\)$", string(stream)) close(stream) @test occursin(r"^BufferedOutputStream{.*}\(<closed>\)$", string(stream)) @test_throws ArgumentError BufferedOutputStream(IOBuffer(), 0) end end
{"hexsha": "efc6182986da19e8cef5ad91208a5403db2a8132", "size": 16859, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "test/runtests.jl", "max_stars_repo_name": "UnofficialJuliaMirror/BufferedStreams.jl-e1450e63-4bb3-523b-b2a4-4ffa8c0fd77d", "max_stars_repo_head_hexsha": "b85ff82efcd95d3a4d72a4021f0833b579554bec", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 26, "max_stars_repo_stars_event_min_datetime": "2015-09-13T01:44:53.000Z", "max_stars_repo_stars_event_max_datetime": "2022-02-04T03:02:41.000Z", "max_issues_repo_path": "test/runtests.jl", "max_issues_repo_name": "UnofficialJuliaMirror/BufferedStreams.jl-e1450e63-4bb3-523b-b2a4-4ffa8c0fd77d", "max_issues_repo_head_hexsha": "b85ff82efcd95d3a4d72a4021f0833b579554bec", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 57, "max_issues_repo_issues_event_min_datetime": "2015-09-14T02:38:05.000Z", "max_issues_repo_issues_event_max_datetime": "2021-03-16T20:32:52.000Z", "max_forks_repo_path": "test/runtests.jl", "max_forks_repo_name": "UnofficialJuliaMirror/BufferedStreams.jl-e1450e63-4bb3-523b-b2a4-4ffa8c0fd77d", "max_forks_repo_head_hexsha": "b85ff82efcd95d3a4d72a4021f0833b579554bec", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 25, "max_forks_repo_forks_event_min_datetime": "2015-12-24T17:46:36.000Z", "max_forks_repo_forks_event_max_datetime": "2021-10-01T19:28:51.000Z", "avg_line_length": 31.8695652174, "max_line_length": 99, "alphanum_fraction": 0.5653953378, "num_tokens": 4469}
import random from typing import Union, Tuple, Any, Mapping import numpy as np from albumentations.core.transforms_interface import ( ImageOnlyTransform, ) from .functional import channel_dropout __all__ = ["ChannelDropout"] class ChannelDropout(ImageOnlyTransform): """Randomly Drop Channels in the input Image. Args: channel_drop_range (int, int): range from which we choose the number of channels to drop. fill_value (int, float): pixel value for the dropped channel. p (float): probability of applying the transform. Default: 0.5. Targets: image Image types: uint8, uint16, unit32, float32 """ def __init__( self, channel_drop_range: Tuple[int, int] = (1, 1), fill_value: Union[int, float] = 0, always_apply: bool = False, p: float = 0.5, ): super(ChannelDropout, self).__init__(always_apply, p) self.channel_drop_range = channel_drop_range self.min_channels = channel_drop_range[0] self.max_channels = channel_drop_range[1] if not 1 <= self.min_channels <= self.max_channels: raise ValueError("Invalid channel_drop_range. Got: {}".format(channel_drop_range)) self.fill_value = fill_value def apply(self, img: np.ndarray, channels_to_drop: Tuple[int, ...] = (0,), **params) -> np.ndarray: return channel_dropout(img, channels_to_drop, self.fill_value) def get_params_dependent_on_targets(self, params: Mapping[str, Any]): img = params["image"] num_channels = img.shape[-1] if len(img.shape) == 2 or num_channels == 1: raise NotImplementedError("Images has one channel. ChannelDropout is not defined.") if self.max_channels >= num_channels: raise ValueError("Can not drop all channels in ChannelDropout.") num_drop_channels = random.randint(self.min_channels, self.max_channels) channels_to_drop = random.sample(range(num_channels), k=num_drop_channels) return {"channels_to_drop": channels_to_drop} def get_transform_init_args_names(self) -> Tuple[str, ...]: return "channel_drop_range", "fill_value" @property def targets_as_params(self): return ["image"]
{"hexsha": "45d42c76d0a70c4b6dee8817c3955d4b0b1b141e", "size": 2280, "ext": "py", "lang": "Python", "max_stars_repo_path": "albumentations/augmentations/dropout/channel_dropout.py", "max_stars_repo_name": "Multihuntr/albumentations", "max_stars_repo_head_hexsha": "d2db7d7d8859f7090f6fc39a37d6c8858db218d6", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 3893, "max_stars_repo_stars_event_min_datetime": "2018-06-06T12:38:13.000Z", "max_stars_repo_stars_event_max_datetime": "2019-11-18T06:21:47.000Z", "max_issues_repo_path": "albumentations/augmentations/dropout/channel_dropout.py", "max_issues_repo_name": "ballade8/albumentations", "max_issues_repo_head_hexsha": "880c1aaed10ab74cfe851496a476c1c34cadcd0f", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 270, "max_issues_repo_issues_event_min_datetime": "2018-06-22T18:53:41.000Z", "max_issues_repo_issues_event_max_datetime": "2019-11-16T17:49:43.000Z", "max_forks_repo_path": "albumentations/augmentations/dropout/channel_dropout.py", "max_forks_repo_name": "ballade8/albumentations", "max_forks_repo_head_hexsha": "880c1aaed10ab74cfe851496a476c1c34cadcd0f", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 545, "max_forks_repo_forks_event_min_datetime": "2018-06-21T14:33:44.000Z", "max_forks_repo_forks_event_max_datetime": "2019-11-18T07:27:17.000Z", "avg_line_length": 30.8108108108, "max_line_length": 103, "alphanum_fraction": 0.6710526316, "include": true, "reason": "import numpy", "num_tokens": 512}
[STATEMENT] lemma p1_has_only_Says' [rule_format]: "evs \<in> p1 \<Longrightarrow> ev \<in> set evs \<longrightarrow> (\<exists>A B X. ev=Says A B X)" [PROOF STATE] proof (prove) goal (1 subgoal): 1. evs \<in> p1 \<Longrightarrow> ev \<in> set evs \<longrightarrow> (\<exists>A B X. ev = Says A B X) [PROOF STEP] by (erule p1.induct, auto simp: req_def pro_def)
{"llama_tokens": 146, "file": null, "length": 1}
// Copyright (c) 2013 Damond Howard // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt #include <hpx/hpx.hpp> #include <hpx/runtime/components/component_factory.hpp> #include <hpx/util/portable_binary_iarchive.hpp> #include <hpx/util/portable_binary_oarchive.hpp> #include <boost/serialization/version.hpp> #include <boost/serialization/export.hpp> #include "server/kernel.hpp" HPX_REGISTER_COMPONENT_MODULE(); typedef hpx::components::managed_component< hpx::cuda::server::kernel> cuda_kernel_type; HPX_REGISTER_MINIMAL_COMPONENT_FACTORY(cuda_kernel_type,kernel); HPX_REGISTER_ACTION(cuda_kernel_type::wrapped_type::set_stream_action, cuda_kernel_set_stream_action); HPX_REGISTER_ACTION(cuda_kernel_type::wrapped_type::load_module_action, cuda_kernel_load_module_action); HPX_REGISTER_ACTION(cuda_kernel_type::wrapped_type::load_kernel_action, cuda_kernel_load_kernel_action); HPX_REGISTER_ACTION(cuda_kernel_type::wrapped_type::set_grid_dim_action, cuda_kernel_set_diminsions_action); HPX_REGISTER_ACTION(cuda_kernel_type::wrapped_type::set_block_dim_action, cuda_kernel_set_block_dim_action); HPX_REGISTER_ACTION(cuda_kernel_type::wrapped_type::get_function_action, cuda_kernel_get_function_action); HPX_REGISTER_ACTION(cuda_kernel_type::wrapped_type::get_module_action, cuda_kernel_get_module_action); HPX_REGISTER_ACTION(cuda_kernel_type::wrapped_type::get_grid_action, cuda_kernel_get_grid_action); HPX_REGISTER_ACTION(cuda_kernel_type::wrapped_type::get_block_action, cuda_kernel_get_block_action);
{"hexsha": "d7af4f6da983f416106c4f940ca9a4c0020bb489", "size": 1635, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "cuda/kernel.cpp", "max_stars_repo_name": "josephwinston/hpxcl", "max_stars_repo_head_hexsha": "ebd18f245beea46be943ffa22e2fb8145b0b2836", "max_stars_repo_licenses": ["BSL-1.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "cuda/kernel.cpp", "max_issues_repo_name": "josephwinston/hpxcl", "max_issues_repo_head_hexsha": "ebd18f245beea46be943ffa22e2fb8145b0b2836", "max_issues_repo_licenses": ["BSL-1.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "cuda/kernel.cpp", "max_forks_repo_name": "josephwinston/hpxcl", "max_forks_repo_head_hexsha": "ebd18f245beea46be943ffa22e2fb8145b0b2836", "max_forks_repo_licenses": ["BSL-1.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 39.8780487805, "max_line_length": 79, "alphanum_fraction": 0.8513761468, "num_tokens": 357}
from manimlib.animation.animation import Animation from manimlib.utils.rate_functions import linear import numpy as np class Simulate(Animation): CONFIG = { "rate_func" : linear } def __init__(self, scene, mobject, **kwargs): self.physic_object = mobject self.scene = mobject super().__init__(mobject, **kwargs) def interpolate_mobject(self, alpha): obj = self.physic_object if alpha < obj.show_alpha: return if not obj.is_added and alpha >= obj.show_alpha: obj.is_added = True self.scene.add(obj.mobject) n = len(obj.pos_and_rot) - 1 pos, rot = obj.pos_and_rot[int(n * alpha)] x, y = pos obj.mobject.move_to(np.array([x, y, 0]) / 1000) obj.mobject.rotate(rot - obj.angle) obj.angle = rot
{"hexsha": "3625acc2421e3fcffa0ad71c1180afd43706b1ad", "size": 858, "ext": "py", "lang": "Python", "max_stars_repo_path": "manim_demo/manimlib/animation/physics.py", "max_stars_repo_name": "shujunge/manim_tutorial", "max_stars_repo_head_hexsha": "8e320373f0404dcc0a200ab3750ee70784dc1345", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "manim_demo/manimlib/animation/physics.py", "max_issues_repo_name": "shujunge/manim_tutorial", "max_issues_repo_head_hexsha": "8e320373f0404dcc0a200ab3750ee70784dc1345", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "manim_demo/manimlib/animation/physics.py", "max_forks_repo_name": "shujunge/manim_tutorial", "max_forks_repo_head_hexsha": "8e320373f0404dcc0a200ab3750ee70784dc1345", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 28.6, "max_line_length": 56, "alphanum_fraction": 0.6037296037, "include": true, "reason": "import numpy", "num_tokens": 216}
# BSD 3-Clause License; see https://github.com/scikit-hep/awkward-1.0/blob/master/LICENSE from __future__ import absolute_import import sys import copy import pytest import numpy import awkward1 def test(): np_array = numpy.array([0.0, 1.1, 2.2, 3.3, 4.4, 5.5, 6.6, 7.7, 8.8, 9.9]) one = awkward1.Array(np_array) np_array[1] = 999 assert awkward1.to_list(one) == [0.0, 999, 2.2, 3.3, 4.4, 5.5, 6.6, 7.7, 8.8, 9.9] two = copy.copy(one) np_array[3] = 123 assert awkward1.to_list(two) == [0.0, 999, 2.2, 123, 4.4, 5.5, 6.6, 7.7, 8.8, 9.9] three = copy.deepcopy(two) four = numpy.copy(two) np_array[5] = 321 assert awkward1.to_list(three) == [0.0, 999, 2.2, 123, 4.4, 5.5, 6.6, 7.7, 8.8, 9.9] assert awkward1.to_list(four) == [0.0, 999, 2.2, 123, 4.4, 5.5, 6.6, 7.7, 8.8, 9.9] assert awkward1.to_list(copy.deepcopy(awkward1.Array([[1, 2, 3], [], [4, 5]]))) == [[1, 2, 3], [], [4, 5]] assert awkward1.to_list(copy.deepcopy(awkward1.Record({"one": 1, "two": 2.2}))) == awkward1.to_list(copy.deepcopy(awkward1.Record({"one": 1, "two": 2.2})))
{"hexsha": "fae4e37a5ea0145f72dec01a0e09ad2a587ac92f", "size": 1103, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/test_0511-copy-and-deepcopy.py", "max_stars_repo_name": "uccross/awkward-1.0", "max_stars_repo_head_hexsha": "af774dee5efa3ced4c478e2172f4cb71057ace33", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "tests/test_0511-copy-and-deepcopy.py", "max_issues_repo_name": "uccross/awkward-1.0", "max_issues_repo_head_hexsha": "af774dee5efa3ced4c478e2172f4cb71057ace33", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tests/test_0511-copy-and-deepcopy.py", "max_forks_repo_name": "uccross/awkward-1.0", "max_forks_repo_head_hexsha": "af774dee5efa3ced4c478e2172f4cb71057ace33", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.4411764706, "max_line_length": 159, "alphanum_fraction": 0.601087942, "include": true, "reason": "import numpy", "num_tokens": 499}
from __future__ import absolute_import from __future__ import division from __future__ import print_function import argparse from torch.utils.data import DataLoader import torch import torch.nn as nn from torch.autograd import Variable import torchvision import torchvision.transforms as transforms from torch.utils.data import Dataset import torch.nn.functional as F from torch import optim from models.testresults import testResult from models import network import os import numpy as np import matplotlib.pyplot as plt from PIL import Image import warnings warnings.filterwarnings('ignore') #set some random number seed def setup_seed(seed): torch.manual_seed(seed) torch.cuda.manual_seed_all(seed) np.random.seed(seed) torch.backends.cudnn.deterministic = True #adjust the learning rate at some epoch def adjust_learning_rate(optimizer, epoch): update_list = [15, 17, 20] if epoch in update_list: for param_group in optimizer.param_groups: param_group['lr'] = param_group['lr'] * 0.1 return #training process def train(epoch): model.train() for batch_idx, data, in enumerate(trainloader): if not args.cpu: inputs ,labels = data data, target = inputs.cuda(), labels.cuda() data = torch.tensor(data, dtype=torch.float32) optimizer.zero_grad() output = model(data) loss = criterion(output, target.long()) optimizer.zero_grad() loss.backward() optimizer.step() if batch_idx % 1 == 0: print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}\tLR: {}'.format( epoch, batch_idx * len(data), len(trainloader.dataset), 100. * batch_idx / len(trainloader), loss.data.item(), optimizer.param_groups[0]['lr'])) return #after training, test the model and get the change detection results def test(): global best_kappa global best_FA global best_MA model.eval() Label = [] for i , data in enumerate(testloader,0): if not args.cpu: images = data.cuda() images = torch.tensor(images, dtype=torch.float32) outputs = model(images) _, predicted = torch.max(outputs, 1) predicted.cpu().numpy() predicted.cuda() Label.extend(predicted) outputLabel = np.array(Label) outputImg = outputLabel.reshape((641,613),order = 'F') outputImg = Image.fromarray(np.uint8(outputImg*255.0)) outputImg = outputImg.convert('L') refImg = plt.imread('/home/rf/wangliang/ref_img/Sendai-B-ref.png') testResults = testResult(outputImg,refImg) best_FA = 0 best_MA = 0 FA, MA = testResults.FA_MA() refLabel = refImg.reshape(-1,) acc = testResults.Acc(outputLabel,refLabel) kappa = testResults.KappaCoef(FA, MA) if kappa > best_kappa: outputImg.save('./'+num+'-SB-result_'+num1+'.bmp') best_MA = MA best_FA = FA best_kappa = kappa with open('./'+num+'_result-SB_'+num1+'.txt','a') as file_handle: file_handle.write("epoch = {}\tFA = {}\tMA = {}\tKappa = {}\n".format(epoch,FA,MA,kappa)) print('Best FA: {}\tBest MA: {}\tBest kappa: {:.4f}\n'.format(FA,MA,kappa)) return #load your train data by using this class class trainDatasets(Dataset): def __init__(self,datasets,label,transforms=None): self.datasets = np.load(datasets) self.label = np.load(label) self.transforms = transforms def __len__(self): return len(self.datasets) def __getitem__(self,idx): traindata = self.datasets[idx] traindata = np.transpose(traindata,(2,0,1)) label = self.label[idx] data = (traindata,label) return data #load your test data by using this class class testDatasets(Dataset): def __init__(self,datasets,transforms=None): self.datasets = np.load(datasets) self.transforms = transforms def __len__(self): return len(self.datasets) def __getitem__(self,idx): testdata = self.datasets[idx] testdata = np.transpose(testdata,(2,0,1)) return testdata if __name__=='__main__': parser = argparse.ArgumentParser() parser.add_argument('--cpu', action='store_true',help='set if only CPU is available') parser.add_argument('--gpu_id', action='store', default='1',help='gpu_id') parser.add_argument('--lr', action='store', default=0.00001,help='the intial learning rate') parser.add_argument('--wd', action='store', default=1e-5,help='the intial learning rate') parser.add_argument('--train_batch_size', type=int, default=512) parser.add_argument('--test_batch_size', type=int, default=256) parser.add_argument('--num_workers', type=int, default=4) parser.add_argument('--start_epochs', type=int, default=1, metavar='N',help='number of epochs to train_start') parser.add_argument('--end_epochs', type=int, default=100, metavar='N',help='number of epochs to train_end') # W/A — bits parser.add_argument('--Wbits', type=int, default=4) parser.add_argument('--Abits', type=int, default=4) parser.add_argument('--q_type', type=int, default=0,help='quantization type:0-symmetric,1-asymmetric') args = parser.parse_args() print('==> Options:',args) if args.gpu_id: os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu_id setup_seed(1) print('==> Preparing data..') #load your data here train_datasets = trainDatasets(datasets='./'+ num +'YA+YB+SA_train.npy',label='./'+ num +'YA+YB+SA_train_Lab.npy') trainloader = DataLoader(train_datasets,shuffle=True,batch_size=args.train_batch_size,num_workers=args.num_workers) test_datasets = testDatasets(datasets='./'+ num +'SB_test.npy') testloader = DataLoader(test_datasets,shuffle=False,batch_size=args.test_batch_size,num_workers=args.num_workers) print('******Initializing model******') model = network.Net( abits=args.Abits, wbits=args.Wbits) best_kappa = 0 for m in model.modules(): if isinstance(m, nn.Conv2d): nn.init.xavier_uniform_(m.weight.data) if m.bias is not None: m.bias.data.zero_() elif isinstance(m, nn.Linear): m.weight.data.normal_(0, 0.01) m.bias.data.zero_() #model to cuda if not args.cpu: model.cuda() model = torch.nn.DataParallel(model, device_ids=range(torch.cuda.device_count())) print(model) base_lr = float(args.lr) param_dict = dict(model.named_parameters()) params = [] for key, value in param_dict.items(): params += [{'params':[value], 'lr': base_lr, 'weight_decay':args.wd}] criterion = nn.CrossEntropyLoss() optimizer = optim.Adam(params, lr=base_lr, weight_decay=args.wd) for epoch in range(args.start_epochs, args.end_epochs): adjust_learning_rate(optimizer, epoch) train(epoch) test()
{"hexsha": "6a50f5d09b296e0436216ca9e548733dbace8b0d", "size": 7068, "ext": "py", "lang": "Python", "max_stars_repo_path": "main.py", "max_stars_repo_name": "NSLRP/IIQ-CNN", "max_stars_repo_head_hexsha": "09d8d9e5dd704fb317769b7e8c9faa6617b2e23d", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "main.py", "max_issues_repo_name": "NSLRP/IIQ-CNN", "max_issues_repo_head_hexsha": "09d8d9e5dd704fb317769b7e8c9faa6617b2e23d", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "main.py", "max_forks_repo_name": "NSLRP/IIQ-CNN", "max_forks_repo_head_hexsha": "09d8d9e5dd704fb317769b7e8c9faa6617b2e23d", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 39.2666666667, "max_line_length": 119, "alphanum_fraction": 0.6511035654, "include": true, "reason": "import numpy", "num_tokens": 1695}
import json from bs4 import BeautifulSoup import urllib3 import numpy as np import pandas as pd import re http = urllib3.PoolManager() rows = 20 def get_data(start: int, rows: int): url = f"http://nemo-solrcloud-cluster1.springer-sbm.com:8983/solr/nemo_smed_live-2019-10-17-10/select?wt=json&q=*:*&fq=-productGroups:[*%20TO%20*]&fq=description:[*%20TO%20*]&fq=disciplinetaxonomyNames:Onkologie&fq=language:en&rows={rows}&start={start}" response = http.request('GET', url) return json.loads(response.data.decode('utf-8')) _RE_COMBINE_WHITESPACE = re.compile(r"\s+") def clean_text_from_html(raw_html: str) -> str: clean_text = BeautifulSoup(raw_html, 'html.parser').text clean_text.replace('\r', ' ').replace('\n', ' ') clean_text = _RE_COMBINE_WHITESPACE.sub(" ", clean_text).strip() return clean_text def get_document_data(document): doc_id = document['id'] doc_title = clean_text_from_html(document['title']) doc_abstract = clean_text_from_html(document['description']) return [doc_id, doc_title, doc_abstract] export_folder = "../exported_data/" filename_csv = export_folder + "smed_onko_articles.csv" def create_csv(data): arr = np.asarray(data) df = pd.DataFrame(data=arr, columns=['id', 'title', 'abstract']) df.to_csv(filename_csv, index=False) print(filename_csv, "is created") data = get_data(0, 0) numFound = data['response']['numFound'] print(numFound, "documents found.") articles = [] start = 0 for i in range(0, numFound, rows): print(f"Get documents {start + 1} to {start + rows}") data = get_data(start, rows) for document in data['response']['docs']: articles.append(get_document_data(document)) start = start + rows create_csv(articles) print(len(articles) - 1, " articles were exported to ", filename_csv)
{"hexsha": "7d4c845ff3065061360d6dad81594db694aae668", "size": 1832, "ext": "py", "lang": "Python", "max_stars_repo_path": "export_from_solr/export_smed_articles.py", "max_stars_repo_name": "lukas-abegg/nemo-elasticbert-service", "max_stars_repo_head_hexsha": "3d1d47ce7525b85b2f86f570d550022ac297ca6d", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2020-05-06T18:57:40.000Z", "max_stars_repo_stars_event_max_datetime": "2020-08-10T09:48:35.000Z", "max_issues_repo_path": "export_from_solr/export_smed_articles.py", "max_issues_repo_name": "lukas-abegg/nemo-elasticbert-service", "max_issues_repo_head_hexsha": "3d1d47ce7525b85b2f86f570d550022ac297ca6d", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "export_from_solr/export_smed_articles.py", "max_forks_repo_name": "lukas-abegg/nemo-elasticbert-service", "max_forks_repo_head_hexsha": "3d1d47ce7525b85b2f86f570d550022ac297ca6d", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 27.3432835821, "max_line_length": 257, "alphanum_fraction": 0.7041484716, "include": true, "reason": "import numpy", "num_tokens": 497}
""" Test Logistic Regression Model Since Logistic Regression model is from scikit-learn, we test it by checking if it has the same result as the model in scikit-learn on iris dataset. """ import unittest from numpy.testing import assert_array_equal from sklearn import datasets try: from sklearn.model_selection import train_test_split except ImportError: from sklearn.cross_validation import train_test_split import sklearn.linear_model from libact.base.dataset import Dataset from libact.models import LogisticRegression class LogisticRegressionIrisTestCase(unittest.TestCase): def setUp(self): iris = datasets.load_iris() X = iris.data y = iris.target self.X_train, self.X_test, self.y_train, self.y_test = \ train_test_split(X, y, test_size=0.3, random_state=1126) def test_logistic_regression(self): clf = sklearn.linear_model.LogisticRegression() clf.fit(self.X_train, self.y_train) lr = LogisticRegression() lr.train(Dataset(self.X_train, self.y_train)) assert_array_equal( clf.predict(self.X_train), lr.predict(self.X_train)) assert_array_equal( clf.predict(self.X_test), lr.predict(self.X_test)) self.assertEqual( clf.score(self.X_train, self.y_train), lr.score(Dataset(self.X_train, self.y_train))) self.assertEqual( clf.score(self.X_test, self.y_test), lr.score(Dataset(self.X_test, self.y_test))) if __name__ == '__main__': unittest.main()
{"hexsha": "16c3268161aca46853d12bbf4fa8f66805d94ac8", "size": 1562, "ext": "py", "lang": "Python", "max_stars_repo_path": "libact/models/tests/test_logistic_regression.py", "max_stars_repo_name": "joequant/libact", "max_stars_repo_head_hexsha": "4fbf4d59fd0d4e23858b264de2f35f674c50445b", "max_stars_repo_licenses": ["BSD-2-Clause"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2019-05-09T13:00:45.000Z", "max_stars_repo_stars_event_max_datetime": "2019-05-09T13:00:45.000Z", "max_issues_repo_path": "libact/models/tests/test_logistic_regression.py", "max_issues_repo_name": "DunZhang/libact", "max_issues_repo_head_hexsha": "e37e9ed6c36febe701d84b2d495c958ab02f0bc8", "max_issues_repo_licenses": ["BSD-2-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "libact/models/tests/test_logistic_regression.py", "max_forks_repo_name": "DunZhang/libact", "max_forks_repo_head_hexsha": "e37e9ed6c36febe701d84b2d495c958ab02f0bc8", "max_forks_repo_licenses": ["BSD-2-Clause"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-01-18T20:07:57.000Z", "max_forks_repo_forks_event_max_datetime": "2021-01-18T20:07:57.000Z", "avg_line_length": 31.8775510204, "max_line_length": 79, "alphanum_fraction": 0.6984635083, "include": true, "reason": "from numpy", "num_tokens": 329}
# https://towardsdatascience.com/step-by-step-r-cnn-implementation-from-scratch-in-python-e97101ccde55 # https://github.com/1297rohit/RCNN import os import cv2 import pandas as pd import numpy as np # Intersection over Union https://www.pyimagesearch.com/2016/11/07/intersection-over-union-iou-for-object-detection/ def get_iou(bb1, bb2): assert bb1["x1"] < bb1["x2"] assert bb1["y1"] < bb1["y2"] assert bb2["x1"] < bb2["x2"] assert bb2["x1"] < bb2["x2"] x_left = max(bb1["x1"], bb2["x1"]) y_top = max(bb1["y1"], bb2["y1"]) x_right = min(bb1["x2"], bb2["x2"]) y_bottom = min(bb1["y2"], bb2["y2"]) if x_right < x_left or y_bottom < y_top: return 0.0 intersection_area = (x_right - x_left) * (y_bottom - y_top) bb1_area = (bb1["x2"] - bb1["x1"]) * (bb1["y2"] - bb1["y1"]) bb2_area = (bb2["x2"] - bb2["x1"]) * (bb2["y2"] - bb2["y1"]) iou = intersection_area / float(bb1_area + bb2_area - intersection_area) assert iou >= 0.0 assert iou <= 1.0 return iou # Initialize segmentation cv2.setUseOptimized(True) ss = cv2.ximgproc.segmentation.createSelectiveSearchSegmentation() annot = "./Airplanes_Annotations" path = "./Images" iou_thresh = 0.99 samples_per_image = 30 out_width = 32 out_height = 32 sam_0_i = 0 sam_1_i = 0 # Extract images for e1, i in enumerate(os.listdir(annot)): try: if i.startswith("airplane"): # Image filename = i.split(".")[0] + ".jpg" print(f"Image number: {e1} , name: {filename}") image = cv2.imread(os.path.join(path, filename)) imout = image.copy() counter = 0 # Object ROI df = pd.read_csv(os.path.join(annot, i)) gtvalues = [] for row in df.iterrows(): x1 = int(row[1][0].split(" ")[0]) y1 = int(row[1][0].split(" ")[1]) x2 = int(row[1][0].split(" ")[2]) y2 = int(row[1][0].split(" ")[3]) gtvalues.append({"x1": x1, "x2": x2, "y1": y1, "y2": y2}) timage = imout[y1:y2, x1:x2] # Set interpolation interpolation = cv2.INTER_AREA if timage.shape[0] > out_height or timage.shape[0] > out_width: interpolation = cv2.INTER_CUBIC resized = cv2.resize( timage, (out_height, out_width), interpolation=interpolation ) object_image_path = os.path.join(path, "1", str(sam_1_i) + ".png") cv2.imwrite(object_image_path, resized) print(f"Object image number: {sam_1_i} , path: {object_image_path}") counter += 1 sam_1_i += 1 # Segmentation ss.setBaseImage(image) ss.switchToSelectiveSearchFast() ssresults = ss.process() falsecounter = 0 samples_done = False positives_done = False negatives_done = False # Check segments for e, result in enumerate(ssresults): if e < 2000 and not samples_done: # Check interlap with at least one segment max_iou = 0 for gtval in gtvalues: x, y, w, h = result iou = get_iou( gtval, {"x1": x, "x2": x + w, "y1": y, "y2": y + h} ) if iou > max_iou: max_iou = iou if counter < samples_per_image: if max_iou > iou_thresh: timage = imout[y : y + h, x : x + w] # Set interpolation interpolation = cv2.INTER_AREA if ( timage.shape[0] > out_height or timage.shape[0] > out_width ): interpolation = cv2.INTER_CUBIC resized = cv2.resize( timage, (out_height, out_width), interpolation=interpolation, ) object_overlap_image_path = os.path.join( path, "1", str(sam_1_i) + ".png" ) cv2.imwrite(object_overlap_image_path, resized) print( f"Object overlap image number: {sam_1_i} , path: {object_overlap_image_path}" ) counter += 1 sam_1_i += 1 else: positives_done = True if falsecounter < len(gtvalues): if max_iou <= 1 - iou_thresh: timage = imout[y : y + h, x : x + w] # Set interpolation interpolation = cv2.INTER_AREA if ( timage.shape[0] > out_height or timage.shape[0] > out_width ): interpolation = cv2.INTER_CUBIC resized = cv2.resize( timage, (out_height, out_width), interpolation=interpolation, ) backgroud_image_path = os.path.join( path, "0", str(sam_0_i) + ".png" ) cv2.imwrite(backgroud_image_path, resized) print( f"Background number: {sam_0_i} , path: {backgroud_image_path}" ) falsecounter += 1 sam_0_i += 1 else: negatives_done = True if positives_done and negatives_done: print("samples set completed") samples_done = True except Exception as e: print(e) print(f"error processing {filename}")
{"hexsha": "29d950b08c95e3ef41b2dcef2c8ef4b2414178dd", "size": 6489, "ext": "py", "lang": "Python", "max_stars_repo_path": "CategoricalRCNN/categorical_image_generator.py", "max_stars_repo_name": "vladiant/ObjectDetectSamples", "max_stars_repo_head_hexsha": "ac82d0d8284f6fdbcebf1ff429d2aec18cf03fb0", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "CategoricalRCNN/categorical_image_generator.py", "max_issues_repo_name": "vladiant/ObjectDetectSamples", "max_issues_repo_head_hexsha": "ac82d0d8284f6fdbcebf1ff429d2aec18cf03fb0", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "CategoricalRCNN/categorical_image_generator.py", "max_forks_repo_name": "vladiant/ObjectDetectSamples", "max_forks_repo_head_hexsha": "ac82d0d8284f6fdbcebf1ff429d2aec18cf03fb0", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.4590163934, "max_line_length": 116, "alphanum_fraction": 0.4413623054, "include": true, "reason": "import numpy", "num_tokens": 1470}
"""Tests for the TimeChangedBrownianMotion class.""" import numpy as np import pytest from processes.processes import TimeChangedBrownianMotion @pytest.fixture(scope="module") def process(): return TimeChangedBrownianMotion(time_change=lambda t: np.tanh(t)) @pytest.mark.parametrize("time_change", [1, "a", lambda x, t: x]) def test_init_errors(time_change): if callable(time_change): with pytest.raises(ValueError): TimeChangedBrownianMotion(time_change=time_change) else: with pytest.raises(TypeError): TimeChangedBrownianMotion(time_change=time_change) def test_post_init_modification(process): with pytest.raises(ValueError): process.time_change = lambda x, t: x def test_sample(process, T, n_time_grid, n_paths): x0 = 1 paths = process.sample(T=T, n_time_grid=n_time_grid, x0=x0, n_paths=n_paths) assert isinstance(paths, np.ndarray) if n_paths == 1: assert paths.shape == (n_time_grid,) assert paths[0] == x0 else: assert paths.shape == (n_paths, n_time_grid) assert np.all(paths[:, 0] == x0) @pytest.mark.parametrize( "time_change", [lambda t: np.exp(-t), lambda t: -np.exp(-t)] ) def test_invalid_time_change(process, time_change, n_time_grid): process.time_change = time_change with pytest.raises(ValueError): process.sample(T=1, n_time_grid=n_time_grid, x0=1, n_paths=1)
{"hexsha": "39bcade892118141133197a749eff0b86e2c9b9d", "size": 1430, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/test_time_changed_brownian_motion.py", "max_stars_repo_name": "martinstefanik/processes", "max_stars_repo_head_hexsha": "44f95d99995ec0a2dc530f25d3adf69ebfb69841", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "tests/test_time_changed_brownian_motion.py", "max_issues_repo_name": "martinstefanik/processes", "max_issues_repo_head_hexsha": "44f95d99995ec0a2dc530f25d3adf69ebfb69841", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tests/test_time_changed_brownian_motion.py", "max_forks_repo_name": "martinstefanik/processes", "max_forks_repo_head_hexsha": "44f95d99995ec0a2dc530f25d3adf69ebfb69841", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.1836734694, "max_line_length": 80, "alphanum_fraction": 0.7006993007, "include": true, "reason": "import numpy", "num_tokens": 344}
import unittest import six import cupy from cupy import core from cupy import testing @testing.gpu class SimpleReductionFunction(unittest.TestCase): def setUp(self): self.my_int8_sum = core.create_reduction_func( 'my_sum', ('b->b',), ('in0', 'a + b', 'out0 = a', None)) @testing.numpy_cupy_allclose() def check_int8_sum(self, shape, xp, axis=None, keepdims=False): a = testing.shaped_random(shape, xp, 'b') if xp == cupy: return self.my_int8_sum( a, axis=axis, keepdims=keepdims) else: return a.sum(axis=axis, keepdims=keepdims, dtype='b') def test_shape1(self): for i in six.moves.range(1, 10): self.check_int8_sum((2 ** i,)) self.check_int8_sum((2 ** i - 1,)) self.check_int8_sum((2 ** i + 1,)) def test_shape2(self): for i in six.moves.range(1, 10): self.check_int8_sum((2 ** i, 1000), axis=0) self.check_int8_sum((2 ** i - 1, 1000), axis=0) self.check_int8_sum((2 ** i + 1, 1000), axis=0) def test_shape3(self): for i in six.moves.range(1, 10): self.check_int8_sum((2 ** i, 1000), axis=1) self.check_int8_sum((2 ** i - 1, 1000), axis=1) self.check_int8_sum((2 ** i + 1, 1000), axis=1) def test_shape4(self): self.check_int8_sum((512, 256 * 256), axis=0) self.check_int8_sum((512, 256 * 256), axis=1) self.check_int8_sum((512 + 1, 256 * 256 + 1), axis=0) self.check_int8_sum((512 + 1, 256 * 256 + 1), axis=1) def test_shape5(self): size = ((2 << 32) // cupy.core.core.simple_reduction_function._block_size) self.check_int8_sum((size, 1), axis=1) self.check_int8_sum((size, 1), axis=0) @testing.gpu class TestReductionKernel(unittest.TestCase): def setUp(self): self.my_sum = core.ReductionKernel( 'T x', 'T out', 'x', 'a + b', 'out = a', '0', 'my_sum') @testing.numpy_cupy_allclose() def check_int8_sum(self, shape, xp, axis=None, keepdims=False): a = testing.shaped_random(shape, xp, 'b') if xp == cupy: return self.my_sum( a, axis=axis, keepdims=keepdims) else: return a.sum(axis=axis, keepdims=keepdims, dtype='b') def test_shape1(self): for i in six.moves.range(1, 10): self.check_int8_sum((2 ** i,)) self.check_int8_sum((2 ** i - 1,)) self.check_int8_sum((2 ** i + 1,)) def test_shape2(self): for i in six.moves.range(1, 10): self.check_int8_sum((2 ** i, 1000), axis=0) self.check_int8_sum((2 ** i - 1, 1000), axis=0) self.check_int8_sum((2 ** i + 1, 1000), axis=0) def test_shape3(self): for i in six.moves.range(1, 10): self.check_int8_sum((2 ** i, 1000), axis=1) self.check_int8_sum((2 ** i - 1, 1000), axis=1) self.check_int8_sum((2 ** i + 1, 1000), axis=1) def test_shape4(self): self.check_int8_sum((512, 256 * 256), axis=0) self.check_int8_sum((512, 256 * 256), axis=1) self.check_int8_sum((512 + 1, 256 * 256 + 1), axis=0) self.check_int8_sum((512 + 1, 256 * 256 + 1), axis=1) @testing.gpu class TestReductionKernelInvalidArgument(unittest.TestCase): def test_invalid_kernel_name(self): with six.assertRaisesRegex(self, ValueError, 'Invalid kernel name'): core.ReductionKernel( 'T x', 'T y', 'x', 'a + b', 'y = a', '0', name='1')
{"hexsha": "7c9f21ca435400a230b961ecb91e256487878bd5", "size": 3605, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/cupy_tests/core_tests/test_reduction.py", "max_stars_repo_name": "yutiansut/cupy", "max_stars_repo_head_hexsha": "d460ed81d1deb01db7a55c0f57804856299f491d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2018-05-23T19:26:48.000Z", "max_stars_repo_stars_event_max_datetime": "2018-05-23T19:26:48.000Z", "max_issues_repo_path": "tests/cupy_tests/core_tests/test_reduction.py", "max_issues_repo_name": "martindurant/cupy", "max_issues_repo_head_hexsha": "0ce4ba4fe2993547e11c37ec73cb7639c10b3881", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tests/cupy_tests/core_tests/test_reduction.py", "max_forks_repo_name": "martindurant/cupy", "max_forks_repo_head_hexsha": "0ce4ba4fe2993547e11c37ec73cb7639c10b3881", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.0094339623, "max_line_length": 76, "alphanum_fraction": 0.5669902913, "include": true, "reason": "import cupy,from cupy", "num_tokens": 1089}