Unnamed: 0
int64
0
2.93k
code
stringlengths
101
62.2k
docs
stringlengths
51
10.7k
doc_len
int64
4
1.74k
words
int64
4
4.82k
lang
stringclasses
1 value
prompt
stringlengths
320
71.2k
1,000
def reload_markets(self) -> None: # Check whether markets have to be reloaded if (self._last_markets_refresh > 0) and ( self._last_markets_refresh + self.markets_refresh_interval > arrow.utcnow().int_timestamp): return None logger.debug("Performing scheduled market reload..") try: self._markets = self._api.load_markets(reload=True) # Also reload async markets to avoid issues with newly listed pairs self._load_async_markets(reload=True) self._last_markets_refresh = arrow.utcnow().int_timestamp self.fill_leverage_tiers() except ccxt.BaseError: logger.exception("Could not reload markets.")
Reload markets both sync and async if refresh interval has passed
11
56
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def reload_markets(self) -> None: # Check whether markets have to be reloaded if (self._last_markets_refresh > 0) and ( self._last_markets_refresh + self.markets_refresh_interval > arrow.utcnow().int_timestamp): return None logger.debug("Performing scheduled market reload..") try: self._markets = self._api.load_markets(reload=True) # Also reload async markets to avoid issues with newly listed pairs self._load_async_markets(reload=True) self._last_markets_refresh = arrow.utcnow().int_timestamp self.fill_leverage_tiers() except ccxt.BaseError: logger.exception("Could not reload markets.") ``` ###Assistant : Reload markets both sync and async if refresh interval has passed
1,001
def is_strongly_diagonally_dominant(self): r if not self.is_square: return False rows, cols = self.shape
Tests if the matrix is row strongly diagonally dominant. Explanation =========== A $n, n$ matrix $A$ is row strongly diagonally dominant if .. math:: \left|A_{i, i}\right| > \sum_{j = 0, j \neq i}^{n-1} \left|A_{i, j}\right| \quad {\text{for all }} i \in \{ 0, ..., n-1 \} Examples ======== >>> from sympy import Matrix >>> A = Matrix([[3, -2, 1], [1, -3, 2], [-1, 2, 4]]) >>> A.is_strongly_diagonally_dominant False >>> A = Matrix([[-2, 2, 1], [1, 3, 2], [1, -2, 0]]) >>> A.is_strongly_diagonally_dominant False >>> A = Matrix([[-4, 2, 1], [1, 6, 2], [1, -2, 5]]) >>> A.is_strongly_diagonally_dominant True Notes ===== If you want to test whether a matrix is column diagonally dominant, you can apply the test after transposing the matrix.
121
12
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def is_strongly_diagonally_dominant(self): r if not self.is_square: return False rows, cols = self.shape ``` ###Assistant : Tests if the matrix is row strongly diagonally dominant. Explanation =========== A $n, n$ matrix $A$ is row strongly diagonally dominant if .. math:: \left|A_{i, i}\right| > \sum_{j = 0, j \neq i}^{n-1} \left|A_{i, j}\right| \quad {\text{for all }} i \in \{ 0, ..., n-1 \} Examples ======== >>> from sympy import Matrix >>> A = Matrix([[3, -2, 1], [1, -3, 2], [-1, 2, 4]]) >>> A.is_strongly_diagonally_dominant False >>> A = Matrix([[-2, 2, 1], [1, 3, 2], [1, -2, 0]]) >>> A.is_strongly_diagonally_dominant False >>> A = Matrix([[-4, 2, 1], [1, 6, 2], [1, -2, 5]]) >>> A.is_strongly_diagonally_dominant True Notes ===== If you want to test whether a matrix is column diagonally dominant, you can apply the test after transposing the matrix.
1,002
def create_calibration_module(self, feature) -> torch.nn.Module: if feature.get("calibration"): calibration_cls = calibration.get_calibration_cls(BINARY, "temperature_scaling") return calibration_cls(binary=True) return None
Creates the appropriate calibration module based on the feature config. Today, only one type of calibration ("temperature_scaling") is available, but more options may be supported in the future.
28
15
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def create_calibration_module(self, feature) -> torch.nn.Module: if feature.get("calibration"): calibration_cls = calibration.get_calibration_cls(BINARY, "temperature_scaling") return calibration_cls(binary=True) return None ``` ###Assistant : Creates the appropriate calibration module based on the feature config. Today, only one type of calibration ("temperature_scaling") is available, but more options may be supported in the future.
1,003
def _download(url, path, md5sum=None): if not osp.exists(path): os.makedirs(path) fname = osp.split(url)[-1] fullname = osp.join(path, fname) retry_cnt = 0 while not (osp.exists(fullname) and _check_exist_file_md5(fullname, md5sum, url)): if retry_cnt < DOWNLOAD_RETRY_LIMIT: retry_cnt += 1 else: raise RuntimeError("Download from {} failed. " "Retry limit reached".format(url)) logger.info("Downloading {} from {}".format(fname, url)) # NOTE: windows path join may incur \, which is invalid in url if sys.platform == "win32": url = url.replace('\\', '/') req = requests.get(url, stream=True) if req.status_code != 200: raise RuntimeError("Downloading from {} failed with code " "{}!".format(url, req.status_code)) # For protecting download interupted, download to # tmp_fullname firstly, move tmp_fullname to fullname # after download finished tmp_fullname = fullname + "_tmp" total_size = req.headers.get('content-length') with open(tmp_fullname, 'wb') as f: if total_size: for chunk in tqdm.tqdm( req.iter_content(chunk_size=1024), total=(int(total_size) + 1023) // 1024, unit='KB'): f.write(chunk) else: for chunk in req.iter_content(chunk_size=1024): if chunk: f.write(chunk) shutil.move(tmp_fullname, fullname) return fullname
Download from url, save to path. url (str): download url path (str): download to given path
16
143
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _download(url, path, md5sum=None): if not osp.exists(path): os.makedirs(path) fname = osp.split(url)[-1] fullname = osp.join(path, fname) retry_cnt = 0 while not (osp.exists(fullname) and _check_exist_file_md5(fullname, md5sum, url)): if retry_cnt < DOWNLOAD_RETRY_LIMIT: retry_cnt += 1 else: raise RuntimeError("Download from {} failed. " "Retry limit reached".format(url)) logger.info("Downloading {} from {}".format(fname, url)) # NOTE: windows path join may incur \, which is invalid in url if sys.platform == "win32": url = url.replace('\\', '/') req = requests.get(url, stream=True) if req.status_code != 200: raise RuntimeError("Downloading from {} failed with code " "{}!".format(url, req.status_code)) # For protecting download interupted, download to # tmp_fullname firstly, move tmp_fullname to fullname # after download finished tmp_fullname = fullname + "_tmp" total_size = req.headers.get('content-length') with open(tmp_fullname, 'wb') as f: if total_size: for chunk in tqdm.tqdm( req.iter_content(chunk_size=1024), total=(int(total_size) + 1023) // 1024, unit='KB'): f.write(chunk) else: for chunk in req.iter_content(chunk_size=1024): if chunk: f.write(chunk) shutil.move(tmp_fullname, fullname) return fullname ``` ###Assistant : Download from url, save to path. url (str): download url path (str): download to given path
1,004
def related_objects(self): all_related_fields = self._get_fields( forward=False, reverse=True, include_hidden=True ) return make_immutable_fields_list( "related_objects", ( obj for obj in all_related_fields if not obj.hidden or obj.field.many_to_many ), )
Return all related objects pointing to the current model. The related objects can come from a one-to-one, one-to-many, or many-to-many field relation type. Private API intended only to be used by Django itself; get_fields() combined with filtering of field properties is the public API for obtaining this field list.
49
25
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def related_objects(self): all_related_fields = self._get_fields( forward=False, reverse=True, include_hidden=True ) return make_immutable_fields_list( "related_objects", ( obj for obj in all_related_fields if not obj.hidden or obj.field.many_to_many ), ) ``` ###Assistant : Return all related objects pointing to the current model. The related objects can come from a one-to-one, one-to-many, or many-to-many field relation type. Private API intended only to be used by Django itself; get_fields() combined with filtering of field properties is the public API for obtaining this field list.
1,005
def test_https_good_referer(self): req = self._get_POST_request_with_token() req._is_secure_override = True req.META["HTTP_HOST"] = "www.example.com" req.META["HTTP_REFERER"] = "https://www.example.com/somepage" mw = CsrfViewMiddleware(post_form_view) mw.process_request(req) resp = mw.process_view(req, post_form_view, (), {}) self.assertIsNone(resp)
A POST HTTPS request with a good referer is accepted.
10
25
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_https_good_referer(self): req = self._get_POST_request_with_token() req._is_secure_override = True req.META["HTTP_HOST"] = "www.example.com" req.META["HTTP_REFERER"] = "https://www.example.com/somepage" mw = CsrfViewMiddleware(post_form_view) mw.process_request(req) resp = mw.process_view(req, post_form_view, (), {}) self.assertIsNone(resp) ``` ###Assistant : A POST HTTPS request with a good referer is accepted.
1,006
def test_state_policy(self) -> None: room_id = self.helper.create_room_as(self.user_id, tok=self.token) # Set the maximum lifetime to 35 days so that the first event gets expired but not # the second one. self.helper.send_state( room_id=room_id, event_type=EventTypes.Retention, body={"max_lifetime": one_day_ms * 35}, tok=self.token, ) self._test_retention(room_id, expected_code_for_first_event=404)
Tests that an event gets correctly expired if there is no default retention policy but there's a policy specific to the room.
22
40
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_state_policy(self) -> None: room_id = self.helper.create_room_as(self.user_id, tok=self.token) # Set the maximum lifetime to 35 days so that the first event gets expired but not # the second one. self.helper.send_state( room_id=room_id, event_type=EventTypes.Retention, body={"max_lifetime": one_day_ms * 35}, tok=self.token, ) self._test_retention(room_id, expected_code_for_first_event=404) ``` ###Assistant : Tests that an event gets correctly expired if there is no default retention policy but there's a policy specific to the room.
1,007
def insertion_sort(list, n): for i in range(0, n): key = list[i] j = i - 1 # Swap elements witth key iff they are # greater than key while j >= 0 and list[j] > key: list[j + 1] = list[j] j = j - 1 list[j + 1] = key return list
sort list in assending order INPUT: list=list of values to be sorted n=size of list that contains values to be sorted OUTPUT: list of sorted values in assending order
29
53
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def insertion_sort(list, n): for i in range(0, n): key = list[i] j = i - 1 # Swap elements witth key iff they are # greater than key while j >= 0 and list[j] > key: list[j + 1] = list[j] j = j - 1 list[j + 1] = key return list ``` ###Assistant : sort list in assending order INPUT: list=list of values to be sorted n=size of list that contains values to be sorted OUTPUT: list of sorted values in assending order
1,008
def get_template_context(self): return {"name": self.__class__.__name__.lower(), "label": self.label}
:return: a dictionary with context variables for the javascript file associated with the context
14
7
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_template_context(self): return {"name": self.__class__.__name__.lower(), "label": self.label} ``` ###Assistant : :return: a dictionary with context variables for the javascript file associated with the context
1,009
def connect(self): if self.is_connected is True: return self.connection connection = teradatasql.connect( **self.connection_data ) self.is_connected = True self.connection = connection return self.connection
Handles the connection to a Teradata database insance.
8
21
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def connect(self): if self.is_connected is True: return self.connection connection = teradatasql.connect( **self.connection_data ) self.is_connected = True self.connection = connection return self.connection ``` ###Assistant : Handles the connection to a Teradata database insance.
1,010
def unregister_event_manager(self, manager): self.event_managers.remove(manager) for type_id in manager.type_ids: self.event_managers_dict[type_id].remove(manager) manager.stop() manager.window = None
Unregister and stop an event manager previously registered with :meth:`register_event_manager`. .. versionadded:: 2.1.0 .. warning:: This is an experimental method and it remains so until this warning is present as it can be changed or removed in the next versions of Kivy.
42
13
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def unregister_event_manager(self, manager): self.event_managers.remove(manager) for type_id in manager.type_ids: self.event_managers_dict[type_id].remove(manager) manager.stop() manager.window = None ``` ###Assistant : Unregister and stop an event manager previously registered with :meth:`register_event_manager`. .. versionadded:: 2.1.0 .. warning:: This is an experimental method and it remains so until this warning is present as it can be changed or removed in the next versions of Kivy.
1,011
def _c3_mro(cls, abcs=None): for i, base in enumerate(reversed(cls.__bases__)): if hasattr(base, '__abstractmethods__'): boundary = len(cls.__bases__) - i break # Bases up to the last explicit ABC are considered first. else: boundary = 0 abcs = list(abcs) if abcs else [] explicit_bases = list(cls.__bases__[:boundary]) abstract_bases = [] other_bases = list(cls.__bases__[boundary:]) for base in abcs: if issubclass(cls, base) and not any( issubclass(b, base) for b in cls.__bases__ ): # If *cls* is the class that introduces behaviour described by # an ABC *base*, insert said ABC to its MRO. abstract_bases.append(base) for base in abstract_bases: abcs.remove(base) explicit_c3_mros = [_c3_mro(base, abcs=abcs) for base in explicit_bases] abstract_c3_mros = [_c3_mro(base, abcs=abcs) for base in abstract_bases] other_c3_mros = [_c3_mro(base, abcs=abcs) for base in other_bases] return _c3_merge( [[cls]] + explicit_c3_mros + abstract_c3_mros + other_c3_mros + [explicit_bases] + [abstract_bases] + [other_bases] )
Computes the method resolution order using extended C3 linearization. If no *abcs* are given, the algorithm works exactly like the built-in C3 linearization used for method resolution. If given, *abcs* is a list of abstract base classes that should be inserted into the resulting MRO. Unrelated ABCs are ignored and don't end up in the result. The algorithm inserts ABCs where their functionality is introduced, i.e. issubclass(cls, abc) returns True for the class itself but returns False for all its direct base classes. Implicit ABCs for a given class (either registered or inferred from the presence of a special method like __len__) are inserted directly after the last ABC explicitly listed in the MRO of said class. If two implicit ABCs end up next to each other in the resulting MRO, their ordering depends on the order of types in *abcs*.
141
132
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _c3_mro(cls, abcs=None): for i, base in enumerate(reversed(cls.__bases__)): if hasattr(base, '__abstractmethods__'): boundary = len(cls.__bases__) - i break # Bases up to the last explicit ABC are considered first. else: boundary = 0 abcs = list(abcs) if abcs else [] explicit_bases = list(cls.__bases__[:boundary]) abstract_bases = [] other_bases = list(cls.__bases__[boundary:]) for base in abcs: if issubclass(cls, base) and not any( issubclass(b, base) for b in cls.__bases__ ): # If *cls* is the class that introduces behaviour described by # an ABC *base*, insert said ABC to its MRO. abstract_bases.append(base) for base in abstract_bases: abcs.remove(base) explicit_c3_mros = [_c3_mro(base, abcs=abcs) for base in explicit_bases] abstract_c3_mros = [_c3_mro(base, abcs=abcs) for base in abstract_bases] other_c3_mros = [_c3_mro(base, abcs=abcs) for base in other_bases] return _c3_merge( [[cls]] + explicit_c3_mros + abstract_c3_mros + other_c3_mros + [explicit_bases] + [abstract_bases] + [other_bases] ) ``` ###Assistant : Computes the method resolution order using extended C3 linearization. If no *abcs* are given, the algorithm works exactly like the built-in C3 linearization used for method resolution. If given, *abcs* is a list of abstract base classes that should be inserted into the resulting MRO. Unrelated ABCs are ignored and don't end up in the result. The algorithm inserts ABCs where their functionality is introduced, i.e. issubclass(cls, abc) returns True for the class itself but returns False for all its direct base classes. Implicit ABCs for a given class (either registered or inferred from the presence of a special method like __len__) are inserted directly after the last ABC explicitly listed in the MRO of said class. If two implicit ABCs end up next to each other in the resulting MRO, their ordering depends on the order of types in *abcs*.
1,012
async def async_start_charging(self) -> None: await self.hass.async_add_executor_job(self.leaf.start_charging) self.schedule_update()
Request to start charging the car. Used by the button platform.
11
8
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python async def async_start_charging(self) -> None: await self.hass.async_add_executor_job(self.leaf.start_charging) self.schedule_update() ``` ###Assistant : Request to start charging the car. Used by the button platform.
1,013
def test_multiple_server_connections(tctx): server1 = Placeholder(Server) server2 = Placeholder(Server) playbook = Playbook(http.HttpLayer(tctx, HTTPMode.regular), hooks=False)
Test multiple requests being rewritten to different targets.
8
13
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_multiple_server_connections(tctx): server1 = Placeholder(Server) server2 = Placeholder(Server) playbook = Playbook(http.HttpLayer(tctx, HTTPMode.regular), hooks=False) ``` ###Assistant : Test multiple requests being rewritten to different targets.
1,014
def test_overlapping_output_names(self) -> None: self._test_overlapping_names( outputs0=['o0', 'o1'], outputs1=['o1', 'o2'])
Tests error checking when the name of the output overlaps
10
9
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_overlapping_output_names(self) -> None: self._test_overlapping_names( outputs0=['o0', 'o1'], outputs1=['o1', 'o2']) ``` ###Assistant : Tests error checking when the name of the output overlaps
1,015
def test_write_profiles_does_not_include_default(self, temporary_profiles_path): write_profiles({}) assert "profiles.default" not in temporary_profiles_path.read_text()
Including the default has a tendency to bake in settings the user may not want, and can prevent them from gaining new defaults.
23
9
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_write_profiles_does_not_include_default(self, temporary_profiles_path): write_profiles({}) assert "profiles.default" not in temporary_profiles_path.read_text() ``` ###Assistant : Including the default has a tendency to bake in settings the user may not want, and can prevent them from gaining new defaults.
1,016
def phase_retarder(theta=0, delta=0): R = Matrix([[cos(theta)**2 + exp(I*delta)*sin(theta)**2, (1-exp(I*delta))*cos(theta)*sin(theta)], [(1-exp(I*delta))*cos(theta)*sin(theta), sin(theta)**2 + exp(I*delta)*cos(theta)**2]]) return R*exp(-I*delta/2)
A phase retarder Jones matrix with retardance `delta` at angle `theta`. Parameters ========== theta : numeric type or SymPy Symbol The angle of the fast axis relative to the horizontal plane. delta : numeric type or SymPy Symbol The phase difference between the fast and slow axes of the transmitted light. Returns ======= SymPy Matrix : A Jones matrix representing the retarder. Examples ======== A generic retarder. >>> from sympy import pprint, symbols >>> from sympy.physics.optics.polarization import phase_retarder >>> theta, delta = symbols("theta, delta", real=True) >>> R = phase_retarder(theta, delta) >>> pprint(R, use_unicode=True) ⎑ -β…ˆβ‹…Ξ΄ -β…ˆβ‹…Ξ΄ ⎀ ⎒ ───── ───── βŽ₯ βŽ’βŽ› β…ˆβ‹…Ξ΄ 2 2 ⎞ 2 βŽ› β…ˆβ‹…Ξ΄βŽž 2 βŽ₯ βŽ’βŽβ„― β‹…sin (ΞΈ) + cos (ΞΈ)βŽ β‹…β„― ⎝1 - β„― βŽ β‹…β„― β‹…sin(ΞΈ)β‹…cos(ΞΈ)βŽ₯ ⎒ βŽ₯ ⎒ -β…ˆβ‹…Ξ΄ -β…ˆβ‹…Ξ΄ βŽ₯ ⎒ ───── ─────βŽ₯ βŽ’βŽ› β…ˆβ‹…Ξ΄βŽž 2 βŽ› β…ˆβ‹…Ξ΄ 2 2 ⎞ 2 βŽ₯ ⎣⎝1 - β„― βŽ β‹…β„― β‹…sin(ΞΈ)β‹…cos(ΞΈ) βŽβ„― β‹…cos (ΞΈ) + sin (ΞΈ)βŽ β‹…β„― ⎦
153
15
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def phase_retarder(theta=0, delta=0): R = Matrix([[cos(theta)**2 + exp(I*delta)*sin(theta)**2, (1-exp(I*delta))*cos(theta)*sin(theta)], [(1-exp(I*delta))*cos(theta)*sin(theta), sin(theta)**2 + exp(I*delta)*cos(theta)**2]]) return R*exp(-I*delta/2) ``` ###Assistant : A phase retarder Jones matrix with retardance `delta` at angle `theta`. Parameters ========== theta : numeric type or SymPy Symbol The angle of the fast axis relative to the horizontal plane. delta : numeric type or SymPy Symbol The phase difference between the fast and slow axes of the transmitted light. Returns ======= SymPy Matrix : A Jones matrix representing the retarder. Examples ======== A generic retarder. >>> from sympy import pprint, symbols >>> from sympy.physics.optics.polarization import phase_retarder >>> theta, delta = symbols("theta, delta", real=True) >>> R = phase_retarder(theta, delta) >>> pprint(R, use_unicode=True) ⎑ -β…ˆβ‹…Ξ΄ -β…ˆβ‹…Ξ΄ ⎀ ⎒ ───── ───── βŽ₯ βŽ’βŽ› β…ˆβ‹…Ξ΄ 2 2 ⎞ 2 βŽ› β…ˆβ‹…Ξ΄βŽž 2 βŽ₯ βŽ’βŽβ„― β‹…sin (ΞΈ) + cos (ΞΈ)βŽ β‹…β„― ⎝1 - β„― βŽ β‹…β„― β‹…sin(ΞΈ)β‹…cos(ΞΈ)βŽ₯ ⎒ βŽ₯ ⎒ -β…ˆβ‹…Ξ΄ -β…ˆβ‹…Ξ΄ βŽ₯ ⎒ ───── ─────βŽ₯ βŽ’βŽ› β…ˆβ‹…Ξ΄βŽž 2 βŽ› β…ˆβ‹…Ξ΄ 2 2 ⎞ 2 βŽ₯ ⎣⎝1 - β„― βŽ β‹…β„― β‹…sin(ΞΈ)β‹…cos(ΞΈ) βŽβ„― β‹…cos (ΞΈ) + sin (ΞΈ)βŽ β‹…β„― ⎦
1,017
def wrapCommandForDebuggerForExec(*args): gdb_path = getExecutablePath("gdb") # Windows extra ball, attempt the downloaded one. if isWin32Windows() and gdb_path is None: from nuitka.Options import assumeYesForDownloads mingw64_gcc_path = getCachedDownloadedMinGW64( target_arch=getArchitecture(), assume_yes_for_downloads=assumeYesForDownloads(), ) with withEnvironmentPathAdded("PATH", os.path.dirname(mingw64_gcc_path)): lldb_path = getExecutablePath("lldb") if gdb_path is None and lldb_path is None: lldb_path = getExecutablePath("lldb") if lldb_path is None: general.sysexit("Error, no 'gdb' or 'lldb' binary found in path.") if gdb_path is not None: args = (gdb_path, "gdb", "-ex=run", "-ex=where", "-ex=quit", "--args") + args else: args = (lldb_path, "lldb", "-o", "run", "-o", "bt", "-o", "quit", "--") + args return args
Wrap a command for system debugger to call exec Args: args: (list of str) args for call to be debugged Returns: args tuple with debugger command inserted Notes: Currently only gdb and lldb are supported, but adding more debuggers would be very welcome.
43
90
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def wrapCommandForDebuggerForExec(*args): gdb_path = getExecutablePath("gdb") # Windows extra ball, attempt the downloaded one. if isWin32Windows() and gdb_path is None: from nuitka.Options import assumeYesForDownloads mingw64_gcc_path = getCachedDownloadedMinGW64( target_arch=getArchitecture(), assume_yes_for_downloads=assumeYesForDownloads(), ) with withEnvironmentPathAdded("PATH", os.path.dirname(mingw64_gcc_path)): lldb_path = getExecutablePath("lldb") if gdb_path is None and lldb_path is None: lldb_path = getExecutablePath("lldb") if lldb_path is None: general.sysexit("Error, no 'gdb' or 'lldb' binary found in path.") if gdb_path is not None: args = (gdb_path, "gdb", "-ex=run", "-ex=where", "-ex=quit", "--args") + args else: args = (lldb_path, "lldb", "-o", "run", "-o", "bt", "-o", "quit", "--") + args return args ``` ###Assistant : Wrap a command for system debugger to call exec Args: args: (list of str) args for call to be debugged Returns: args tuple with debugger command inserted Notes: Currently only gdb and lldb are supported, but adding more debuggers would be very welcome.
1,018
def test_commands_with_invalid_settings(self): args = ["startproject"] out, err = self.run_django_admin(args, settings_file="bad_settings") self.assertNoOutput(out) self.assertOutput(err, "You must provide a project name", regex=True)
Commands that don't require settings succeed if the settings file doesn't exist.
12
19
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_commands_with_invalid_settings(self): args = ["startproject"] out, err = self.run_django_admin(args, settings_file="bad_settings") self.assertNoOutput(out) self.assertOutput(err, "You must provide a project name", regex=True) ``` ###Assistant : Commands that don't require settings succeed if the settings file doesn't exist.
1,019
def read(self, size=-1): if self.closed: raise ValueError("I/O operation on closed file") if self.size_read >= self.chunksize: return b'' if size < 0: size = self.chunksize - self.size_read if size > self.chunksize - self.size_read: size = self.chunksize - self.size_read data = self.file.read(size) self.size_read = self.size_read + len(data) if self.size_read == self.chunksize and \ self.align and \ (self.chunksize & 1): dummy = self.file.read(1) self.size_read = self.size_read + len(dummy) return data
Read at most size bytes from the chunk. If size is omitted or negative, read until the end of the chunk.
21
67
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def read(self, size=-1): if self.closed: raise ValueError("I/O operation on closed file") if self.size_read >= self.chunksize: return b'' if size < 0: size = self.chunksize - self.size_read if size > self.chunksize - self.size_read: size = self.chunksize - self.size_read data = self.file.read(size) self.size_read = self.size_read + len(data) if self.size_read == self.chunksize and \ self.align and \ (self.chunksize & 1): dummy = self.file.read(1) self.size_read = self.size_read + len(dummy) return data ``` ###Assistant : Read at most size bytes from the chunk. If size is omitted or negative, read until the end of the chunk.
1,020
def slicing_plan(chunks, index): from dask.array.utils import asarray_safe if not is_arraylike(index): index = np.asanyarray(index) cum_chunks = cached_cumsum(chunks) cum_chunks = asarray_safe(cum_chunks, like=index) # this dispactches to the array library chunk_locations = np.searchsorted(cum_chunks, index, side="right") # but we need chunk_locations as python ints for getitem calls downstream chunk_locations = chunk_locations.tolist() where = np.where(np.diff(chunk_locations))[0] + 1 extra = asarray_safe([0], like=where) c_loc = asarray_safe([len(chunk_locations)], like=where) where = np.concatenate([extra, where, c_loc]) out = [] for i in range(len(where) - 1): sub_index = index[where[i] : where[i + 1]] chunk = chunk_locations[where[i]] if chunk > 0: sub_index = sub_index - cum_chunks[chunk - 1] out.append((chunk, sub_index)) return out
Construct a plan to slice chunks with the given index Parameters ---------- chunks : Tuple[int] One dimensions worth of chunking information index : np.ndarray[int] The index passed to slice on that dimension Returns ------- out : List[Tuple[int, np.ndarray]] A list of chunk/sub-index pairs corresponding to each output chunk
48
99
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def slicing_plan(chunks, index): from dask.array.utils import asarray_safe if not is_arraylike(index): index = np.asanyarray(index) cum_chunks = cached_cumsum(chunks) cum_chunks = asarray_safe(cum_chunks, like=index) # this dispactches to the array library chunk_locations = np.searchsorted(cum_chunks, index, side="right") # but we need chunk_locations as python ints for getitem calls downstream chunk_locations = chunk_locations.tolist() where = np.where(np.diff(chunk_locations))[0] + 1 extra = asarray_safe([0], like=where) c_loc = asarray_safe([len(chunk_locations)], like=where) where = np.concatenate([extra, where, c_loc]) out = [] for i in range(len(where) - 1): sub_index = index[where[i] : where[i + 1]] chunk = chunk_locations[where[i]] if chunk > 0: sub_index = sub_index - cum_chunks[chunk - 1] out.append((chunk, sub_index)) return out ``` ###Assistant : Construct a plan to slice chunks with the given index Parameters ---------- chunks : Tuple[int] One dimensions worth of chunking information index : np.ndarray[int] The index passed to slice on that dimension Returns ------- out : List[Tuple[int, np.ndarray]] A list of chunk/sub-index pairs corresponding to each output chunk
1,021
def _add_conv_branch(self) -> None: branch_convs = ModuleList() for i in range(self.num_convs): branch_convs.append( Bottleneck( inplanes=self.conv_out_channels, planes=self.conv_out_channels // 4, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg)) return branch_convs
Add the fc branch which consists of a sequential of conv layers.
12
21
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _add_conv_branch(self) -> None: branch_convs = ModuleList() for i in range(self.num_convs): branch_convs.append( Bottleneck( inplanes=self.conv_out_channels, planes=self.conv_out_channels // 4, conv_cfg=self.conv_cfg, norm_cfg=self.norm_cfg)) return branch_convs ``` ###Assistant : Add the fc branch which consists of a sequential of conv layers.
1,022
def match_files(patterns, files): all_files = files if isinstance(files, Collection) else list(files) return_files = set() for pattern in patterns: if pattern.include is not None: result_files = pattern.match(all_files) if pattern.include: return_files.update(result_files) else: return_files.difference_update(result_files) return return_files
Matches the files to the patterns. *patterns* (:class:`~collections.abc.Iterable` of :class:`~pathspec.pattern.Pattern`) contains the patterns to use. *files* (:class:`~collections.abc.Iterable` of :class:`str`) contains the normalized file paths to be matched against *patterns*. Returns the matched files (:class:`set` of :class:`str`).
36
33
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def match_files(patterns, files): all_files = files if isinstance(files, Collection) else list(files) return_files = set() for pattern in patterns: if pattern.include is not None: result_files = pattern.match(all_files) if pattern.include: return_files.update(result_files) else: return_files.difference_update(result_files) return return_files ``` ###Assistant : Matches the files to the patterns. *patterns* (:class:`~collections.abc.Iterable` of :class:`~pathspec.pattern.Pattern`) contains the patterns to use. *files* (:class:`~collections.abc.Iterable` of :class:`str`) contains the normalized file paths to be matched against *patterns*. Returns the matched files (:class:`set` of :class:`str`).
1,023
def _get_offsets_buffer(self) -> Tuple[PandasBuffer, Any]: if self.dtype[0] == DtypeKind.STRING: # For each string, we need to manually determine the next offset values = self._col.to_numpy() ptr = 0 offsets = np.zeros(shape=(len(values) + 1,), dtype=np.int64) for i, v in enumerate(values): # For missing values (in this case, `np.nan` values) # we don't increment the pointer if isinstance(v, str): b = v.encode(encoding="utf-8") ptr += len(b) offsets[i + 1] = ptr # Convert the offsets to a Pandas "buffer" using # the NumPy array as the backing store buffer = PandasBuffer(offsets) # Assemble the buffer dtype info dtype = ( DtypeKind.INT, 64, ArrowCTypes.INT64, Endianness.NATIVE, ) # note: currently only support native endianness else: raise NoBufferPresent( "This column has a fixed-length dtype so " "it does not have an offsets buffer" ) return buffer, dtype
Return the buffer containing the offset values for variable-size binary data (e.g., variable-length strings) and the buffer's associated dtype. Raises NoBufferPresent if the data buffer does not have an associated offsets buffer.
32
130
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _get_offsets_buffer(self) -> Tuple[PandasBuffer, Any]: if self.dtype[0] == DtypeKind.STRING: # For each string, we need to manually determine the next offset values = self._col.to_numpy() ptr = 0 offsets = np.zeros(shape=(len(values) + 1,), dtype=np.int64) for i, v in enumerate(values): # For missing values (in this case, `np.nan` values) # we don't increment the pointer if isinstance(v, str): b = v.encode(encoding="utf-8") ptr += len(b) offsets[i + 1] = ptr # Convert the offsets to a Pandas "buffer" using # the NumPy array as the backing store buffer = PandasBuffer(offsets) # Assemble the buffer dtype info dtype = ( DtypeKind.INT, 64, ArrowCTypes.INT64, Endianness.NATIVE, ) # note: currently only support native endianness else: raise NoBufferPresent( "This column has a fixed-length dtype so " "it does not have an offsets buffer" ) return buffer, dtype ``` ###Assistant : Return the buffer containing the offset values for variable-size binary data (e.g., variable-length strings) and the buffer's associated dtype. Raises NoBufferPresent if the data buffer does not have an associated offsets buffer.
1,024
def _unschedule_refresh(self) -> None: if self._unsub_refresh: self._unsub_refresh() self._unsub_refresh = None
Unschedule any pending refresh since there is no longer any listeners.
11
10
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _unschedule_refresh(self) -> None: if self._unsub_refresh: self._unsub_refresh() self._unsub_refresh = None ``` ###Assistant : Unschedule any pending refresh since there is no longer any listeners.
1,025
def test_connect_and_rollback(self): new_connection = connection.copy() try: # Ensure the database default time zone is different than # the time zone in new_connection.settings_dict. We can # get the default time zone by reset & show. with new_connection.cursor() as cursor: cursor.execute("RESET TIMEZONE") cursor.execute("SHOW TIMEZONE") db_default_tz = cursor.fetchone()[0] new_tz = "Europe/Paris" if db_default_tz == "UTC" else "UTC" new_connection.close() # Invalidate timezone name cache, because the setting_changed # handler cannot know about new_connection. del new_connection.timezone_name # Fetch a new connection with the new_tz as default # time zone, run a query and rollback. with self.settings(TIME_ZONE=new_tz): new_connection.set_autocommit(False) new_connection.rollback() # Now let's see if the rollback rolled back the SET TIME ZONE. with new_connection.cursor() as cursor: cursor.execute("SHOW TIMEZONE") tz = cursor.fetchone()[0] self.assertEqual(new_tz, tz) finally: new_connection.close()
PostgreSQL shouldn't roll back SET TIME ZONE, even if the first transaction is rolled back (#17062).
16
119
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_connect_and_rollback(self): new_connection = connection.copy() try: # Ensure the database default time zone is different than # the time zone in new_connection.settings_dict. We can # get the default time zone by reset & show. with new_connection.cursor() as cursor: cursor.execute("RESET TIMEZONE") cursor.execute("SHOW TIMEZONE") db_default_tz = cursor.fetchone()[0] new_tz = "Europe/Paris" if db_default_tz == "UTC" else "UTC" new_connection.close() # Invalidate timezone name cache, because the setting_changed # handler cannot know about new_connection. del new_connection.timezone_name # Fetch a new connection with the new_tz as default # time zone, run a query and rollback. with self.settings(TIME_ZONE=new_tz): new_connection.set_autocommit(False) new_connection.rollback() # Now let's see if the rollback rolled back the SET TIME ZONE. with new_connection.cursor() as cursor: cursor.execute("SHOW TIMEZONE") tz = cursor.fetchone()[0] self.assertEqual(new_tz, tz) finally: new_connection.close() ``` ###Assistant : PostgreSQL shouldn't roll back SET TIME ZONE, even if the first transaction is rolled back (#17062).
1,026
def test_readback_tfrecords(ray_start_regular_shared, tmp_path): # The dataset we will write to a .tfrecords file. ds = ray.data.from_items( [ # Row one. { "int_item": 1, "int_list": [2, 2, 3], "float_item": 1.0, "float_list": [2.0, 3.0, 4.0], "bytes_item": b"abc", "bytes_list": [b"abc", b"1234"], }, # Row two. { "int_item": 2, "int_list": [3, 3, 4], "float_item": 2.0, "float_list": [2.0, 2.0, 3.0], "bytes_item": b"def", "bytes_list": [b"def", b"1234"], }, ] ) # Write the TFRecords. ds.write_tfrecords(tmp_path) # Read the TFRecords. readback_ds = ray.data.read_tfrecords(tmp_path) assert ds.take() == readback_ds.take()
Test reading back TFRecords written using datasets. The dataset we read back should be the same that we wrote.
19
79
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_readback_tfrecords(ray_start_regular_shared, tmp_path): # The dataset we will write to a .tfrecords file. ds = ray.data.from_items( [ # Row one. { "int_item": 1, "int_list": [2, 2, 3], "float_item": 1.0, "float_list": [2.0, 3.0, 4.0], "bytes_item": b"abc", "bytes_list": [b"abc", b"1234"], }, # Row two. { "int_item": 2, "int_list": [3, 3, 4], "float_item": 2.0, "float_list": [2.0, 2.0, 3.0], "bytes_item": b"def", "bytes_list": [b"def", b"1234"], }, ] ) # Write the TFRecords. ds.write_tfrecords(tmp_path) # Read the TFRecords. readback_ds = ray.data.read_tfrecords(tmp_path) assert ds.take() == readback_ds.take() ``` ###Assistant : Test reading back TFRecords written using datasets. The dataset we read back should be the same that we wrote.
1,027
def call(self, inputs, state): cur_state_pos = 0 cur_inp = inputs new_states = [] for i, cell in enumerate(self._cells): with tf.compat.v1.variable_scope("cell_%d" % i): if self._state_is_tuple: if not tf.nest.is_nested(state): raise ValueError( "Expected state to be a tuple of length " f"{len(self.state_size)}" f", but received: {state}" ) cur_state = state[i] else: cur_state = tf.slice( state, [0, cur_state_pos], [-1, cell.state_size] ) cur_state_pos += cell.state_size cur_inp, new_state = cell(cur_inp, cur_state) new_states.append(new_state) new_states = ( tuple(new_states) if self._state_is_tuple else tf.concat(new_states, 1) ) return cur_inp, new_states
Run this multi-layer cell on inputs, starting from state.
9
79
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def call(self, inputs, state): cur_state_pos = 0 cur_inp = inputs new_states = [] for i, cell in enumerate(self._cells): with tf.compat.v1.variable_scope("cell_%d" % i): if self._state_is_tuple: if not tf.nest.is_nested(state): raise ValueError( "Expected state to be a tuple of length " f"{len(self.state_size)}" f", but received: {state}" ) cur_state = state[i] else: cur_state = tf.slice( state, [0, cur_state_pos], [-1, cell.state_size] ) cur_state_pos += cell.state_size cur_inp, new_state = cell(cur_inp, cur_state) new_states.append(new_state) new_states = ( tuple(new_states) if self._state_is_tuple else tf.concat(new_states, 1) ) return cur_inp, new_states ``` ###Assistant : Run this multi-layer cell on inputs, starting from state.
1,028
def test_basic(push_channel): msgs = [ {"foo": "bar"}, {"bar": "baz"}, {"baz": "qux", "list": [1, 2, 3]}, ] for msg in msgs: ret = push_channel.send(msg, timeout=5, tries=1) assert ret["load"] == msg
Test a variety of messages, make sure we get the expected responses
12
29
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_basic(push_channel): msgs = [ {"foo": "bar"}, {"bar": "baz"}, {"baz": "qux", "list": [1, 2, 3]}, ] for msg in msgs: ret = push_channel.send(msg, timeout=5, tries=1) assert ret["load"] == msg ``` ###Assistant : Test a variety of messages, make sure we get the expected responses
1,029
def test_put_global(self) -> None: self.get_success( self._module_api.account_data_manager.put_global( self.user_id, "test.data", {"wombat": True} ) ) # Request that account data from the normal store; check it's as we expect. self.assertEqual( self.get_success( self._store.get_global_account_data_by_type_for_user( self.user_id, "test.data" ) ), {"wombat": True}, )
Tests that written account data using `put_global` can be read out again later.
13
36
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_put_global(self) -> None: self.get_success( self._module_api.account_data_manager.put_global( self.user_id, "test.data", {"wombat": True} ) ) # Request that account data from the normal store; check it's as we expect. self.assertEqual( self.get_success( self._store.get_global_account_data_by_type_for_user( self.user_id, "test.data" ) ), {"wombat": True}, ) ``` ###Assistant : Tests that written account data using `put_global` can be read out again later.
1,030
def get_conda_environment_content(build_metadata): template = environment.from_string( .strip() ) return template.render(build_metadata=build_metadata)
# DO NOT EDIT: this file is generated from the specification found in the # following script to centralize the configuration for all Azure CI builds: # build_tools/azure/update_environments_and_lock_files.py channels: - {{ build_metadata['channel'] }} dependencies: {% for conda_dep in build_metadata['conda_dependencies'] %} - {{ conda_dep | get_package_with_constraint(build_metadata) }} {% endfor %} {% if build_metadata['pip_dependencies'] %} - pip - pip: {% for pip_dep in build_metadata.get('pip_dependencies', []) %} - {{ pip_dep | get_package_with_constraint(build_metadata, uses_pip=True) }} {% endfor %} {% endif %}
77
9
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_conda_environment_content(build_metadata): template = environment.from_string( .strip() ) return template.render(build_metadata=build_metadata) ``` ###Assistant : # DO NOT EDIT: this file is generated from the specification found in the # following script to centralize the configuration for all Azure CI builds: # build_tools/azure/update_environments_and_lock_files.py channels: - {{ build_metadata['channel'] }} dependencies: {% for conda_dep in build_metadata['conda_dependencies'] %} - {{ conda_dep | get_package_with_constraint(build_metadata) }} {% endfor %} {% if build_metadata['pip_dependencies'] %} - pip - pip: {% for pip_dep in build_metadata.get('pip_dependencies', []) %} - {{ pip_dep | get_package_with_constraint(build_metadata, uses_pip=True) }} {% endfor %} {% endif %}
1,031
def preprocess(self, image, image_format): format = self.format or image_format save_kwargs = {"format": format} # Ensuring image is properly rotated if hasattr(image, "_getexif"): exif_datadict = image._getexif() # returns None if no EXIF data if exif_datadict is not None: exif = dict(exif_datadict.items()) orientation = exif.get(self.EXIF_ORIENTATION_KEY, None) if orientation == 3: image = image.transpose(Image.ROTATE_180) elif orientation == 6: image = image.transpose(Image.ROTATE_270) elif orientation == 8: image = image.transpose(Image.ROTATE_90) # Ensure any embedded ICC profile is preserved save_kwargs["icc_profile"] = image.info.get("icc_profile") if hasattr(self, "preprocess_%s" % format): image, addl_save_kwargs = getattr(self, "preprocess_%s" % format)( image=image ) save_kwargs.update(addl_save_kwargs) return image, save_kwargs
Preprocess an image. An API hook for image pre-processing. Calls any image format specific pre-processors (if defined). I.E. If `image_format` is 'JPEG', this method will look for a method named `preprocess_JPEG`, if found `image` will be passed to it. Arguments: image: a PIL Image instance image_format: str, a valid PIL format (i.e. 'JPEG' or 'WEBP') Subclasses should return a 2-tuple: * [0]: A PIL Image instance. * [1]: A dictionary of additional keyword arguments to be used when the instance is saved. If no additional keyword arguments, return an empty dict ({}).
92
94
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def preprocess(self, image, image_format): format = self.format or image_format save_kwargs = {"format": format} # Ensuring image is properly rotated if hasattr(image, "_getexif"): exif_datadict = image._getexif() # returns None if no EXIF data if exif_datadict is not None: exif = dict(exif_datadict.items()) orientation = exif.get(self.EXIF_ORIENTATION_KEY, None) if orientation == 3: image = image.transpose(Image.ROTATE_180) elif orientation == 6: image = image.transpose(Image.ROTATE_270) elif orientation == 8: image = image.transpose(Image.ROTATE_90) # Ensure any embedded ICC profile is preserved save_kwargs["icc_profile"] = image.info.get("icc_profile") if hasattr(self, "preprocess_%s" % format): image, addl_save_kwargs = getattr(self, "preprocess_%s" % format)( image=image ) save_kwargs.update(addl_save_kwargs) return image, save_kwargs ``` ###Assistant : Preprocess an image. An API hook for image pre-processing. Calls any image format specific pre-processors (if defined). I.E. If `image_format` is 'JPEG', this method will look for a method named `preprocess_JPEG`, if found `image` will be passed to it. Arguments: image: a PIL Image instance image_format: str, a valid PIL format (i.e. 'JPEG' or 'WEBP') Subclasses should return a 2-tuple: * [0]: A PIL Image instance. * [1]: A dictionary of additional keyword arguments to be used when the instance is saved. If no additional keyword arguments, return an empty dict ({}).
1,032
def get_package_paths(package): pkg_paths = get_all_package_paths(package) if not pkg_paths: raise ValueError(f"Package '{package}' does not exist or is not a package!") if len(pkg_paths) > 1: logger.warning( "get_package_paths - package %s has multiple paths (%r); returning only first one!", package, pkg_paths ) pkg_dir = pkg_paths[0] pkg_base = package_base_path(pkg_dir, package) return pkg_base, pkg_dir
Given a package, return the path to packages stored on this machine and also returns the path to this particular package. For example, if pkg.subpkg lives in /abs/path/to/python/libs, then this function returns ``(/abs/path/to/python/libs, /abs/path/to/python/libs/pkg/subpkg)``. NOTE: due to backwards compatibility, this function returns only one package path along with its base directory. In case of PEP 420 namespace package with multiple location, only first location is returned. To obtain all package paths, use the ``get_all_package_paths`` function and obtain corresponding base directories using the ``package_base_path`` helper.
84
49
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_package_paths(package): pkg_paths = get_all_package_paths(package) if not pkg_paths: raise ValueError(f"Package '{package}' does not exist or is not a package!") if len(pkg_paths) > 1: logger.warning( "get_package_paths - package %s has multiple paths (%r); returning only first one!", package, pkg_paths ) pkg_dir = pkg_paths[0] pkg_base = package_base_path(pkg_dir, package) return pkg_base, pkg_dir ``` ###Assistant : Given a package, return the path to packages stored on this machine and also returns the path to this particular package. For example, if pkg.subpkg lives in /abs/path/to/python/libs, then this function returns ``(/abs/path/to/python/libs, /abs/path/to/python/libs/pkg/subpkg)``. NOTE: due to backwards compatibility, this function returns only one package path along with its base directory. In case of PEP 420 namespace package with multiple location, only first location is returned. To obtain all package paths, use the ``get_all_package_paths`` function and obtain corresponding base directories using the ``package_base_path`` helper.
1,033
def get_address_territory(address_name): territory = None if address_name: address_fields = frappe.db.get_value("Address", address_name, ["city", "state", "country"]) for value in address_fields: territory = frappe.db.get_value("Territory", value) if territory: break return territory
Tries to match city, state and country of address to existing territory
12
27
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_address_territory(address_name): territory = None if address_name: address_fields = frappe.db.get_value("Address", address_name, ["city", "state", "country"]) for value in address_fields: territory = frappe.db.get_value("Territory", value) if territory: break return territory ``` ###Assistant : Tries to match city, state and country of address to existing territory
1,034
def _get_device_coords(self, position, height): x = self.legend_width + RACK_ELEVATION_BORDER_WIDTH y = RACK_ELEVATION_BORDER_WIDTH if self.rack.desc_units: y += int((position - 1) * self.unit_height) else: y += int((self.rack.u_height - position + 1) * self.unit_height) - int(height * self.unit_height) return x, y
Return the X, Y coordinates of the top left corner for a device in the specified rack unit.
18
38
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _get_device_coords(self, position, height): x = self.legend_width + RACK_ELEVATION_BORDER_WIDTH y = RACK_ELEVATION_BORDER_WIDTH if self.rack.desc_units: y += int((position - 1) * self.unit_height) else: y += int((self.rack.u_height - position + 1) * self.unit_height) - int(height * self.unit_height) return x, y ``` ###Assistant : Return the X, Y coordinates of the top left corner for a device in the specified rack unit.
1,035
def igcd(*args): if len(args) < 2: raise TypeError( 'igcd() takes at least 2 arguments (%s given)' % len(args)) args_temp = [abs(as_int(i)) for i in args] if 1 in args_temp: return 1 a = args_temp.pop() if HAS_GMPY: # Using gmpy if present to speed up. for b in args_temp: a = gmpy.gcd(a, b) if b else a return as_int(a) for b in args_temp: a = math.gcd(a, b) return a igcd2 = math.gcd
Computes nonnegative integer greatest common divisor. Explanation =========== The algorithm is based on the well known Euclid's algorithm [1]_. To improve speed, ``igcd()`` has its own caching mechanism. Examples ======== >>> from sympy import igcd >>> igcd(2, 4) 2 >>> igcd(5, 10, 15) 5 References ========== .. [1] https://en.wikipedia.org/wiki/Euclidean_algorithm
49
71
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def igcd(*args): if len(args) < 2: raise TypeError( 'igcd() takes at least 2 arguments (%s given)' % len(args)) args_temp = [abs(as_int(i)) for i in args] if 1 in args_temp: return 1 a = args_temp.pop() if HAS_GMPY: # Using gmpy if present to speed up. for b in args_temp: a = gmpy.gcd(a, b) if b else a return as_int(a) for b in args_temp: a = math.gcd(a, b) return a igcd2 = math.gcd ``` ###Assistant : Computes nonnegative integer greatest common divisor. Explanation =========== The algorithm is based on the well known Euclid's algorithm [1]_. To improve speed, ``igcd()`` has its own caching mechanism. Examples ======== >>> from sympy import igcd >>> igcd(2, 4) 2 >>> igcd(5, 10, 15) 5 References ========== .. [1] https://en.wikipedia.org/wiki/Euclidean_algorithm
1,036
def _prev_next_cb(self, found, *, going_up, callback): if found: result = browsertab.SearchNavigationResult.found # Check if the match count change is opposite to the search direction if self._old_match.current > 0: if not going_up and self._old_match.current > self.match.current: result = browsertab.SearchNavigationResult.wrapped_bottom elif going_up and self._old_match.current < self.match.current: result = browsertab.SearchNavigationResult.wrapped_top else: result = browsertab.SearchNavigationResult.not_found callback(result)
Call the prev/next callback based on the search result.
9
52
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _prev_next_cb(self, found, *, going_up, callback): if found: result = browsertab.SearchNavigationResult.found # Check if the match count change is opposite to the search direction if self._old_match.current > 0: if not going_up and self._old_match.current > self.match.current: result = browsertab.SearchNavigationResult.wrapped_bottom elif going_up and self._old_match.current < self.match.current: result = browsertab.SearchNavigationResult.wrapped_top else: result = browsertab.SearchNavigationResult.not_found callback(result) ``` ###Assistant : Call the prev/next callback based on the search result.
1,037
def onModuleSourceCode(self, module_name, source_code): if module_name != "tensorflow": return source_code source_lines = source_code.splitlines() found_insert = False for i, l in enumerate(source_lines): if l.startswith("def ") and "_running_from_pip_package():" in l: source_lines.insert(i, "_site_packages_dirs = []") source_lines.insert(i, "from tensorflow.python import keras") found_insert = True break if found_insert is True: self.info("Patched 'running-from-pip' path magic.") else: self.sysexit("Did not find 'running-from-pip' path magic code.") return "\n".join(source_lines)
Neutralize some path magic in tensorflow. Notes: Make sure tensorflow understands, we are not running as a PIP installed application.
20
59
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def onModuleSourceCode(self, module_name, source_code): if module_name != "tensorflow": return source_code source_lines = source_code.splitlines() found_insert = False for i, l in enumerate(source_lines): if l.startswith("def ") and "_running_from_pip_package():" in l: source_lines.insert(i, "_site_packages_dirs = []") source_lines.insert(i, "from tensorflow.python import keras") found_insert = True break if found_insert is True: self.info("Patched 'running-from-pip' path magic.") else: self.sysexit("Did not find 'running-from-pip' path magic code.") return "\n".join(source_lines) ``` ###Assistant : Neutralize some path magic in tensorflow. Notes: Make sure tensorflow understands, we are not running as a PIP installed application.
1,038
def cast_scalar_indexer(val): # assumes lib.is_scalar(val) if lib.is_float(val) and val.is_integer(): raise IndexError( # GH#34193 "Indexing with a float is no longer supported. Manually convert " "to an integer key instead." ) return val
Disallow indexing with a float key, even if that key is a round number. Parameters ---------- val : scalar Returns ------- outval : scalar
24
32
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def cast_scalar_indexer(val): # assumes lib.is_scalar(val) if lib.is_float(val) and val.is_integer(): raise IndexError( # GH#34193 "Indexing with a float is no longer supported. Manually convert " "to an integer key instead." ) return val ``` ###Assistant : Disallow indexing with a float key, even if that key is a round number. Parameters ---------- val : scalar Returns ------- outval : scalar
1,039
def load_plugins(base_type, database): # type: (t.Type[C], t.Dict[str, t.Type[C]]) -> None plugins: t.Dict[str, t.Type[C]] = dict((sc.__module__.rsplit('.', 1)[1], sc) for sc in get_subclasses(base_type)) for plugin in plugins: database[plugin] = plugins[plugin]
Load plugins of the specified type and track them in the specified database. Only plugins which have already been imported will be loaded.
23
28
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def load_plugins(base_type, database): # type: (t.Type[C], t.Dict[str, t.Type[C]]) -> None plugins: t.Dict[str, t.Type[C]] = dict((sc.__module__.rsplit('.', 1)[1], sc) for sc in get_subclasses(base_type)) for plugin in plugins: database[plugin] = plugins[plugin] ``` ###Assistant : Load plugins of the specified type and track them in the specified database. Only plugins which have already been imported will be loaded.
1,040
def _undetermined_coefficients_match(expr, x, func=None, eq_homogeneous=S.Zero): r a = Wild('a', exclude=[x]) b = Wild('b', exclude=[x]) expr = powsimp(expr, combine='exp') # exp(x)*exp(2*x + 1) => exp(3*x + 1) retdict = {}
Returns a trial function match if undetermined coefficients can be applied to ``expr``, and ``None`` otherwise. A trial expression can be found for an expression for use with the method of undetermined coefficients if the expression is an additive/multiplicative combination of constants, polynomials in `x` (the independent variable of expr), `\sin(a x + b)`, `\cos(a x + b)`, and `e^{a x}` terms (in other words, it has a finite number of linearly independent derivatives). Note that you may still need to multiply each term returned here by sufficient `x` to make it linearly independent with the solutions to the homogeneous equation. This is intended for internal use by ``undetermined_coefficients`` hints. SymPy currently has no way to convert `\sin^n(x) \cos^m(y)` into a sum of only `\sin(a x)` and `\cos(b x)` terms, so these are not implemented. So, for example, you will need to manually convert `\sin^2(x)` into `[1 + \cos(2 x)]/2` to properly apply the method of undetermined coefficients on it. Examples ======== >>> from sympy import log, exp >>> from sympy.solvers.ode.nonhomogeneous import _undetermined_coefficients_match >>> from sympy.abc import x >>> _undetermined_coefficients_match(9*x*exp(x) + exp(-x), x) {'test': True, 'trialset': {x*exp(x), exp(-x), exp(x)}} >>> _undetermined_coefficients_match(log(x), x) {'test': False}
194
29
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _undetermined_coefficients_match(expr, x, func=None, eq_homogeneous=S.Zero): r a = Wild('a', exclude=[x]) b = Wild('b', exclude=[x]) expr = powsimp(expr, combine='exp') # exp(x)*exp(2*x + 1) => exp(3*x + 1) retdict = {} ``` ###Assistant : Returns a trial function match if undetermined coefficients can be applied to ``expr``, and ``None`` otherwise. A trial expression can be found for an expression for use with the method of undetermined coefficients if the expression is an additive/multiplicative combination of constants, polynomials in `x` (the independent variable of expr), `\sin(a x + b)`, `\cos(a x + b)`, and `e^{a x}` terms (in other words, it has a finite number of linearly independent derivatives). Note that you may still need to multiply each term returned here by sufficient `x` to make it linearly independent with the solutions to the homogeneous equation. This is intended for internal use by ``undetermined_coefficients`` hints. SymPy currently has no way to convert `\sin^n(x) \cos^m(y)` into a sum of only `\sin(a x)` and `\cos(b x)` terms, so these are not implemented. So, for example, you will need to manually convert `\sin^2(x)` into `[1 + \cos(2 x)]/2` to properly apply the method of undetermined coefficients on it. Examples ======== >>> from sympy import log, exp >>> from sympy.solvers.ode.nonhomogeneous import _undetermined_coefficients_match >>> from sympy.abc import x >>> _undetermined_coefficients_match(9*x*exp(x) + exp(-x), x) {'test': True, 'trialset': {x*exp(x), exp(-x), exp(x)}} >>> _undetermined_coefficients_match(log(x), x) {'test': False}
1,041
def _remove_raw(self) -> None: if "raw" in self._selections: return logger.debug("Removing Raw Data from output") for key in list(self._stats.keys()): if key.startswith("raw"): del self._stats[key] logger.debug("Removed Raw Data from output")
Remove raw values from :attr:`stats` if they are not requested.
10
27
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _remove_raw(self) -> None: if "raw" in self._selections: return logger.debug("Removing Raw Data from output") for key in list(self._stats.keys()): if key.startswith("raw"): del self._stats[key] logger.debug("Removed Raw Data from output") ``` ###Assistant : Remove raw values from :attr:`stats` if they are not requested.
1,042
def get_keras_blocks(keras_weight_names): # example: 'block1a_dwconv/depthwise_kernel:0' -> 'block1a' keras_blocks = {x.split("_")[0] for x in keras_weight_names if "block" in x} return sorted(keras_blocks)
Extract the block names from list of full weight names.
10
20
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_keras_blocks(keras_weight_names): # example: 'block1a_dwconv/depthwise_kernel:0' -> 'block1a' keras_blocks = {x.split("_")[0] for x in keras_weight_names if "block" in x} return sorted(keras_blocks) ``` ###Assistant : Extract the block names from list of full weight names.
1,043
def get_dashboard_info(party_type, party, loyalty_program=None): current_fiscal_year = get_fiscal_year(nowdate(), as_dict=True) doctype = "Sales Invoice" if party_type == "Customer" else "Purchase Invoice" companies = frappe.get_all( doctype, filters={"docstatus": 1, party_type.lower(): party}, distinct=1, fields=["company"] ) company_wise_info = [] company_wise_grand_total = frappe.get_all( doctype, filters={ "docstatus": 1, party_type.lower(): party, "posting_date": ( "between", [current_fiscal_year.year_start_date, current_fiscal_year.year_end_date], ), }, group_by="company", fields=[ "company", "sum(grand_total) as grand_total", "sum(base_grand_total) as base_grand_total", ], ) loyalty_point_details = [] if party_type == "Customer": loyalty_point_details = frappe._dict( frappe.get_all( "Loyalty Point Entry", filters={ "customer": party, "expiry_date": (">=", getdate()), }, group_by="company", fields=["company", "sum(loyalty_points) as loyalty_points"], as_list=1, ) ) company_wise_billing_this_year = frappe._dict() for d in company_wise_grand_total: company_wise_billing_this_year.setdefault( d.company, {"grand_total": d.grand_total, "base_grand_total": d.base_grand_total} ) company_wise_total_unpaid = frappe._dict( frappe.db.sql( , (party_type, party), ) ) for d in companies: company_default_currency = frappe.db.get_value("Company", d.company, "default_currency") party_account_currency = get_party_account_currency(party_type, party, d.company) if party_account_currency == company_default_currency: billing_this_year = flt( company_wise_billing_this_year.get(d.company, {}).get("base_grand_total") ) else: billing_this_year = flt(company_wise_billing_this_year.get(d.company, {}).get("grand_total")) total_unpaid = flt(company_wise_total_unpaid.get(d.company)) if loyalty_point_details: loyalty_points = loyalty_point_details.get(d.company) info = {} info["billing_this_year"] = flt(billing_this_year) if billing_this_year else 0 info["currency"] = party_account_currency info["total_unpaid"] = flt(total_unpaid) if total_unpaid else 0 info["company"] = d.company if party_type == "Customer" and loyalty_point_details: info["loyalty_points"] = loyalty_points if party_type == "Supplier": info["total_unpaid"] = -1 * info["total_unpaid"] company_wise_info.append(info) return company_wise_info
select company, sum(debit_in_account_currency) - sum(credit_in_account_currency) from `tabGL Entry` where party_type = %s and party=%s and is_cancelled = 0 group by company
21
193
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_dashboard_info(party_type, party, loyalty_program=None): current_fiscal_year = get_fiscal_year(nowdate(), as_dict=True) doctype = "Sales Invoice" if party_type == "Customer" else "Purchase Invoice" companies = frappe.get_all( doctype, filters={"docstatus": 1, party_type.lower(): party}, distinct=1, fields=["company"] ) company_wise_info = [] company_wise_grand_total = frappe.get_all( doctype, filters={ "docstatus": 1, party_type.lower(): party, "posting_date": ( "between", [current_fiscal_year.year_start_date, current_fiscal_year.year_end_date], ), }, group_by="company", fields=[ "company", "sum(grand_total) as grand_total", "sum(base_grand_total) as base_grand_total", ], ) loyalty_point_details = [] if party_type == "Customer": loyalty_point_details = frappe._dict( frappe.get_all( "Loyalty Point Entry", filters={ "customer": party, "expiry_date": (">=", getdate()), }, group_by="company", fields=["company", "sum(loyalty_points) as loyalty_points"], as_list=1, ) ) company_wise_billing_this_year = frappe._dict() for d in company_wise_grand_total: company_wise_billing_this_year.setdefault( d.company, {"grand_total": d.grand_total, "base_grand_total": d.base_grand_total} ) company_wise_total_unpaid = frappe._dict( frappe.db.sql( , (party_type, party), ) ) for d in companies: company_default_currency = frappe.db.get_value("Company", d.company, "default_currency") party_account_currency = get_party_account_currency(party_type, party, d.company) if party_account_currency == company_default_currency: billing_this_year = flt( company_wise_billing_this_year.get(d.company, {}).get("base_grand_total") ) else: billing_this_year = flt(company_wise_billing_this_year.get(d.company, {}).get("grand_total")) total_unpaid = flt(company_wise_total_unpaid.get(d.company)) if loyalty_point_details: loyalty_points = loyalty_point_details.get(d.company) info = {} info["billing_this_year"] = flt(billing_this_year) if billing_this_year else 0 info["currency"] = party_account_currency info["total_unpaid"] = flt(total_unpaid) if total_unpaid else 0 info["company"] = d.company if party_type == "Customer" and loyalty_point_details: info["loyalty_points"] = loyalty_points if party_type == "Supplier": info["total_unpaid"] = -1 * info["total_unpaid"] company_wise_info.append(info) return company_wise_info ``` ###Assistant : select company, sum(debit_in_account_currency) - sum(credit_in_account_currency) from `tabGL Entry` where party_type = %s and party=%s and is_cancelled = 0 group by company
1,044
def print_help(self): help_text = console.print(text=help_text, menu="Portfolio - Brokers - Robinhood")
Print help[cmds] login login to robinhood holdings show account holdings in stocks history show equity history of your account [/cmds]
20
10
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def print_help(self): help_text = console.print(text=help_text, menu="Portfolio - Brokers - Robinhood") ``` ###Assistant : Print help[cmds] login login to robinhood holdings show account holdings in stocks history show equity history of your account [/cmds]
1,045
def test_page_with_inline_model_with_tabbed_panel_only(self): EventPageSpeaker.settings_panels = [ FieldPanel("first_name"), FieldPanel("last_name"), ] warning = checks.Warning( "EventPageSpeaker.settings_panels will have no effect on InlinePanel model editing", hint=, obj=EventPageSpeaker, id=self.warning_id, ) checks_results = self.get_checks_result() self.assertIn(warning, checks_results) delattr(EventPageSpeaker, "settings_panels")
Test that checks will warn against setting single tabbed panel on InlinePanel modelEnsure that EventPageSpeaker uses `panels` instead of `settings_panels`. There are no tabs on non-Page model editing within InlinePanels.
30
31
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_page_with_inline_model_with_tabbed_panel_only(self): EventPageSpeaker.settings_panels = [ FieldPanel("first_name"), FieldPanel("last_name"), ] warning = checks.Warning( "EventPageSpeaker.settings_panels will have no effect on InlinePanel model editing", hint=, obj=EventPageSpeaker, id=self.warning_id, ) checks_results = self.get_checks_result() self.assertIn(warning, checks_results) delattr(EventPageSpeaker, "settings_panels") ``` ###Assistant : Test that checks will warn against setting single tabbed panel on InlinePanel modelEnsure that EventPageSpeaker uses `panels` instead of `settings_panels`. There are no tabs on non-Page model editing within InlinePanels.
1,046
def to_kwargs(self): default_dict = self.__class__().to_dict() this_dict = self.to_dict() return {k: v for k, v in this_dict.items() if default_dict[k] != v} @dataclass
Returns a dictionary containing the attributes with values different from the default of this class.
15
21
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def to_kwargs(self): default_dict = self.__class__().to_dict() this_dict = self.to_dict() return {k: v for k, v in this_dict.items() if default_dict[k] != v} @dataclass ``` ###Assistant : Returns a dictionary containing the attributes with values different from the default of this class.
1,047
def update_document_archive_file(document_id): document = Document.objects.get(id=document_id) mime_type = document.mime_type parser_class: Type[DocumentParser] = get_parser_class_for_mime_type(mime_type) if not parser_class: logger.error( f"No parser found for mime type {mime_type}, cannot " f"archive document {document} (ID: {document_id})", ) return parser: DocumentParser = parser_class(logging_group=uuid.uuid4()) try: parser.parse(document.source_path, mime_type, document.get_public_filename()) thumbnail = parser.get_thumbnail( document.source_path, mime_type, document.get_public_filename(), ) if parser.get_archive_path(): with transaction.atomic(): with open(parser.get_archive_path(), "rb") as f: checksum = hashlib.md5(f.read()).hexdigest() # I'm going to save first so that in case the file move # fails, the database is rolled back. # We also don't use save() since that triggers the filehandling # logic, and we don't want that yet (file not yet in place) document.archive_filename = generate_unique_filename( document, archive_filename=True, ) Document.objects.filter(pk=document.pk).update( archive_checksum=checksum, content=parser.get_text(), archive_filename=document.archive_filename, ) with FileLock(settings.MEDIA_LOCK): create_source_path_directory(document.archive_path) shutil.move(parser.get_archive_path(), document.archive_path) shutil.move(thumbnail, document.thumbnail_path) with index.open_index_writer() as writer: index.update_document(writer, document) except Exception: logger.exception( f"Error while parsing document {document} " f"(ID: {document_id})", ) finally: parser.cleanup()
Re-creates the archive file of a document, including new OCR content and thumbnail
13
141
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def update_document_archive_file(document_id): document = Document.objects.get(id=document_id) mime_type = document.mime_type parser_class: Type[DocumentParser] = get_parser_class_for_mime_type(mime_type) if not parser_class: logger.error( f"No parser found for mime type {mime_type}, cannot " f"archive document {document} (ID: {document_id})", ) return parser: DocumentParser = parser_class(logging_group=uuid.uuid4()) try: parser.parse(document.source_path, mime_type, document.get_public_filename()) thumbnail = parser.get_thumbnail( document.source_path, mime_type, document.get_public_filename(), ) if parser.get_archive_path(): with transaction.atomic(): with open(parser.get_archive_path(), "rb") as f: checksum = hashlib.md5(f.read()).hexdigest() # I'm going to save first so that in case the file move # fails, the database is rolled back. # We also don't use save() since that triggers the filehandling # logic, and we don't want that yet (file not yet in place) document.archive_filename = generate_unique_filename( document, archive_filename=True, ) Document.objects.filter(pk=document.pk).update( archive_checksum=checksum, content=parser.get_text(), archive_filename=document.archive_filename, ) with FileLock(settings.MEDIA_LOCK): create_source_path_directory(document.archive_path) shutil.move(parser.get_archive_path(), document.archive_path) shutil.move(thumbnail, document.thumbnail_path) with index.open_index_writer() as writer: index.update_document(writer, document) except Exception: logger.exception( f"Error while parsing document {document} " f"(ID: {document_id})", ) finally: parser.cleanup() ``` ###Assistant : Re-creates the archive file of a document, including new OCR content and thumbnail
1,048
def execute(): frappe.reload_doc("Selling", "doctype", "Customer Credit Limit") frappe.reload_doc("Selling", "doctype", "Customer") frappe.reload_doc("Setup", "doctype", "Customer Group") if frappe.db.a_row_exists("Customer Credit Limit"): return move_credit_limit_to_child_table()
Move credit limit and bypass credit limit to the child table of customer credit limit
15
20
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def execute(): frappe.reload_doc("Selling", "doctype", "Customer Credit Limit") frappe.reload_doc("Selling", "doctype", "Customer") frappe.reload_doc("Setup", "doctype", "Customer Group") if frappe.db.a_row_exists("Customer Credit Limit"): return move_credit_limit_to_child_table() ``` ###Assistant : Move credit limit and bypass credit limit to the child table of customer credit limit
1,049
def forceexit(self, tradeid, ordertype=None, amount=None): return self._post("forceexit", data={ "tradeid": tradeid, "ordertype": ordertype, "amount": amount, })
Force-exit a trade. :param tradeid: Id of the trade (can be received via status command) :param ordertype: Order type to use (must be market or limit) :param amount: Amount to sell. Full sell if not given :return: json object
39
15
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def forceexit(self, tradeid, ordertype=None, amount=None): return self._post("forceexit", data={ "tradeid": tradeid, "ordertype": ordertype, "amount": amount, }) ``` ###Assistant : Force-exit a trade. :param tradeid: Id of the trade (can be received via status command) :param ordertype: Order type to use (must be market or limit) :param amount: Amount to sell. Full sell if not given :return: json object
1,050
def _abi3_applies(python_version): # type: (PythonVersion) -> bool return len(python_version) > 1 and tuple(python_version) >= (3, 2)
Determine if the Python version supports abi3. PEP 384 was first implemented in Python 3.2.
15
16
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _abi3_applies(python_version): # type: (PythonVersion) -> bool return len(python_version) > 1 and tuple(python_version) >= (3, 2) ``` ###Assistant : Determine if the Python version supports abi3. PEP 384 was first implemented in Python 3.2.
1,051
def variable(value, dtype=None, name=None, constraint=None): if dtype is None: dtype = floatx() if hasattr(value, "tocoo"): sparse_coo = value.tocoo() indices = np.concatenate( ( np.expand_dims(sparse_coo.row, 1), np.expand_dims(sparse_coo.col, 1), ), 1, ) v = tf.SparseTensor( indices=indices, values=sparse_coo.data, dense_shape=sparse_coo.shape, ) v._keras_shape = sparse_coo.shape return v v = tf.Variable( value, dtype=tf.as_dtype(dtype), name=name, constraint=constraint ) if isinstance(value, np.ndarray): v._keras_shape = value.shape elif hasattr(value, "shape"): v._keras_shape = int_shape(value) track_variable(v) return v
Instantiates a variable and returns it. Args: value: Numpy array, initial value of the tensor. dtype: Tensor type. name: Optional name string for the tensor. constraint: Optional projection function to be applied to the variable after an optimizer update. Returns: A variable instance (with Keras metadata included). Examples: >>> val = np.array([[1, 2], [3, 4]]) >>> kvar = tf.keras.backend.variable(value=val, dtype='float64', ... name='example_var') >>> tf.keras.backend.dtype(kvar) 'float64' >>> print(kvar) <tf.Variable 'example_var:...' shape=(2, 2) dtype=float64, numpy= array([[1., 2.], [3., 4.]])>
77
64
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def variable(value, dtype=None, name=None, constraint=None): if dtype is None: dtype = floatx() if hasattr(value, "tocoo"): sparse_coo = value.tocoo() indices = np.concatenate( ( np.expand_dims(sparse_coo.row, 1), np.expand_dims(sparse_coo.col, 1), ), 1, ) v = tf.SparseTensor( indices=indices, values=sparse_coo.data, dense_shape=sparse_coo.shape, ) v._keras_shape = sparse_coo.shape return v v = tf.Variable( value, dtype=tf.as_dtype(dtype), name=name, constraint=constraint ) if isinstance(value, np.ndarray): v._keras_shape = value.shape elif hasattr(value, "shape"): v._keras_shape = int_shape(value) track_variable(v) return v ``` ###Assistant : Instantiates a variable and returns it. Args: value: Numpy array, initial value of the tensor. dtype: Tensor type. name: Optional name string for the tensor. constraint: Optional projection function to be applied to the variable after an optimizer update. Returns: A variable instance (with Keras metadata included). Examples: >>> val = np.array([[1, 2], [3, 4]]) >>> kvar = tf.keras.backend.variable(value=val, dtype='float64', ... name='example_var') >>> tf.keras.backend.dtype(kvar) 'float64' >>> print(kvar) <tf.Variable 'example_var:...' shape=(2, 2) dtype=float64, numpy= array([[1., 2.], [3., 4.]])>
1,052
def get_cost_to_borrow() -> pd.DataFrame: ftp = ftplib.FTP("ftp3.interactivebrokers.com", "shortstock") flo = BytesIO() ftp.retrbinary("RETR usa.txt", flo.write) flo.seek(0) data = pd.read_csv(flo, sep="|", skiprows=1) data = data[["#SYM", "FEERATE", "AVAILABLE"]] data["AVAILABLE"] = data["AVAILABLE"].replace(">10000000", 10000000) data.fillna(0, inplace=True) data["AVAILABLE"] = data["AVAILABLE"].astype(int) data.sort_values(by=["FEERATE"], ascending=False, inplace=True) data["FEERATE"] = data["FEERATE"].apply(lambda x: str(x) + "%") data.columns = ["Symbol", "Fees", "Available"] return data
Get stocks with highest cost to borrow [Source: Interactive Broker] Returns ------- pd.DataFrame Cost to borrow
16
51
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_cost_to_borrow() -> pd.DataFrame: ftp = ftplib.FTP("ftp3.interactivebrokers.com", "shortstock") flo = BytesIO() ftp.retrbinary("RETR usa.txt", flo.write) flo.seek(0) data = pd.read_csv(flo, sep="|", skiprows=1) data = data[["#SYM", "FEERATE", "AVAILABLE"]] data["AVAILABLE"] = data["AVAILABLE"].replace(">10000000", 10000000) data.fillna(0, inplace=True) data["AVAILABLE"] = data["AVAILABLE"].astype(int) data.sort_values(by=["FEERATE"], ascending=False, inplace=True) data["FEERATE"] = data["FEERATE"].apply(lambda x: str(x) + "%") data.columns = ["Symbol", "Fees", "Available"] return data ``` ###Assistant : Get stocks with highest cost to borrow [Source: Interactive Broker] Returns ------- pd.DataFrame Cost to borrow
1,053
def _enable_task_listeners(): if get_listener_manager().has_listeners: register_task_instance_state_events()
Check if we have any registered listeners, then register sqlalchemy hooks for TI state change if we do.
18
5
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _enable_task_listeners(): if get_listener_manager().has_listeners: register_task_instance_state_events() ``` ###Assistant : Check if we have any registered listeners, then register sqlalchemy hooks for TI state change if we do.
1,054
def has_computed_output(self) -> bool: return self._snapshot_blocks is not None and not self._stages_after_snapshot
Whether this plan has a computed snapshot for the final stage, i.e. for the output of this plan.
18
12
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def has_computed_output(self) -> bool: return self._snapshot_blocks is not None and not self._stages_after_snapshot ``` ###Assistant : Whether this plan has a computed snapshot for the final stage, i.e. for the output of this plan.
1,055
def get_conditions(filters): conditions = "" accounting_dimensions = get_accounting_dimensions(as_list=False) or [] accounting_dimensions_list = [d.fieldname for d in accounting_dimensions] if filters.get("company"): conditions += " and company=%(company)s" if filters.get("customer") and "customer" not in accounting_dimensions_list: conditions += " and customer = %(customer)s" if filters.get("from_date"): conditions += " and posting_date >= %(from_date)s" if filters.get("to_date"): conditions += " and posting_date <= %(to_date)s" if filters.get("owner"): conditions += " and owner = %(owner)s" def get_sales_invoice_item_field_condition(field, table="Sales Invoice Item") -> str: if not filters.get(field) or field in accounting_dimensions_list: return "" return f conditions += get_sales_invoice_item_field_condition("mode_of_payments", "Sales Invoice Payment") conditions += get_sales_invoice_item_field_condition("cost_center") conditions += get_sales_invoice_item_field_condition("warehouse") conditions += get_sales_invoice_item_field_condition("brand") conditions += get_sales_invoice_item_field_condition("item_group") if accounting_dimensions: common_condition = for dimension in accounting_dimensions: if filters.get(dimension.fieldname): if frappe.get_cached_value("DocType", dimension.document_type, "is_tree"): filters[dimension.fieldname] = get_dimension_with_children( dimension.document_type, filters.get(dimension.fieldname) ) conditions += ( common_condition + "and ifnull(`tabSales Invoice Item`.{0}, '') in %({0})s)".format(dimension.fieldname) ) else: conditions += ( common_condition + "and ifnull(`tabSales Invoice Item`.{0}, '') in %({0})s)".format(dimension.fieldname) ) return conditions
and exists(select name from `tab{table}` where parent=`tabSales Invoice`.name and ifnull(`tab{table}`.{field}, '') = %({field})s) and exists(select name from `tabSales Invoice Item` where parent=`tabSales Invoice`.name
23
150
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_conditions(filters): conditions = "" accounting_dimensions = get_accounting_dimensions(as_list=False) or [] accounting_dimensions_list = [d.fieldname for d in accounting_dimensions] if filters.get("company"): conditions += " and company=%(company)s" if filters.get("customer") and "customer" not in accounting_dimensions_list: conditions += " and customer = %(customer)s" if filters.get("from_date"): conditions += " and posting_date >= %(from_date)s" if filters.get("to_date"): conditions += " and posting_date <= %(to_date)s" if filters.get("owner"): conditions += " and owner = %(owner)s" def get_sales_invoice_item_field_condition(field, table="Sales Invoice Item") -> str: if not filters.get(field) or field in accounting_dimensions_list: return "" return f conditions += get_sales_invoice_item_field_condition("mode_of_payments", "Sales Invoice Payment") conditions += get_sales_invoice_item_field_condition("cost_center") conditions += get_sales_invoice_item_field_condition("warehouse") conditions += get_sales_invoice_item_field_condition("brand") conditions += get_sales_invoice_item_field_condition("item_group") if accounting_dimensions: common_condition = for dimension in accounting_dimensions: if filters.get(dimension.fieldname): if frappe.get_cached_value("DocType", dimension.document_type, "is_tree"): filters[dimension.fieldname] = get_dimension_with_children( dimension.document_type, filters.get(dimension.fieldname) ) conditions += ( common_condition + "and ifnull(`tabSales Invoice Item`.{0}, '') in %({0})s)".format(dimension.fieldname) ) else: conditions += ( common_condition + "and ifnull(`tabSales Invoice Item`.{0}, '') in %({0})s)".format(dimension.fieldname) ) return conditions ``` ###Assistant : and exists(select name from `tab{table}` where parent=`tabSales Invoice`.name and ifnull(`tab{table}`.{field}, '') = %({field})s) and exists(select name from `tabSales Invoice Item` where parent=`tabSales Invoice`.name
1,056
def _get_classifier_artifacts(fitted_estimator, prefix, X, y_true, sample_weight): import sklearn if not _is_plotting_supported(): return []
Draw and record various common artifacts for classifier For all classifiers, we always log: (1) confusion matrix: https://scikit-learn.org/stable/modules/generated/sklearn.metrics.plot_confusion_matrix.html For only binary classifiers, we will log: (2) precision recall curve: https://scikit-learn.org/stable/modules/generated/sklearn.metrics.plot_precision_recall_curve.html (3) roc curve: https://scikit-learn.org/stable/auto_examples/model_selection/plot_roc.html Steps: 1. Extract X and y_true from fit_args and fit_kwargs, and split into train & test datasets. 2. If the sample_weight argument exists in fit_func (accuracy_score by default has sample_weight), extract it from fit_args or fit_kwargs as (y_true, y_pred, sample_weight, multioutput), otherwise as (y_true, y_pred, multioutput) 3. return a list of artifacts path to be logged :param fitted_estimator: The already fitted regressor :param fit_args: Positional arguments given to fit_func. :param fit_kwargs: Keyword arguments given to fit_func. :return: List of artifacts to be logged
117
13
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _get_classifier_artifacts(fitted_estimator, prefix, X, y_true, sample_weight): import sklearn if not _is_plotting_supported(): return [] ``` ###Assistant : Draw and record various common artifacts for classifier For all classifiers, we always log: (1) confusion matrix: https://scikit-learn.org/stable/modules/generated/sklearn.metrics.plot_confusion_matrix.html For only binary classifiers, we will log: (2) precision recall curve: https://scikit-learn.org/stable/modules/generated/sklearn.metrics.plot_precision_recall_curve.html (3) roc curve: https://scikit-learn.org/stable/auto_examples/model_selection/plot_roc.html Steps: 1. Extract X and y_true from fit_args and fit_kwargs, and split into train & test datasets. 2. If the sample_weight argument exists in fit_func (accuracy_score by default has sample_weight), extract it from fit_args or fit_kwargs as (y_true, y_pred, sample_weight, multioutput), otherwise as (y_true, y_pred, multioutput) 3. return a list of artifacts path to be logged :param fitted_estimator: The already fitted regressor :param fit_args: Positional arguments given to fit_func. :param fit_kwargs: Keyword arguments given to fit_func. :return: List of artifacts to be logged
1,057
def double_edge_swap(G, nswap=1, max_tries=100, seed=None): if G.is_directed(): raise nx.NetworkXError( "double_edge_swap() not defined for directed graphs. Use directed_edge_swap instead." ) if nswap > max_tries: raise nx.NetworkXError("Number of swaps > number of tries allowed.") if len(G) < 4: raise nx.NetworkXError("Graph has less than four nodes.") # Instead of choosing uniformly at random from a generated edge list, # this algorithm chooses nonuniformly from the set of nodes with # probability weighted by degree. n = 0 swapcount = 0 keys, degrees = zip(*G.degree()) # keys, degree cdf = nx.utils.cumulative_distribution(degrees) # cdf of degree discrete_sequence = nx.utils.discrete_sequence while swapcount < nswap: # if random.random() < 0.5: continue # trick to avoid periodicities? # pick two random edges without creating edge list # choose source node indices from discrete distribution (ui, xi) = discrete_sequence(2, cdistribution=cdf, seed=seed) if ui == xi: continue # same source, skip u = keys[ui] # convert index to label x = keys[xi] # choose target uniformly from neighbors v = seed.choice(list(G[u])) y = seed.choice(list(G[x])) if v == y: continue # same target, skip if (x not in G[u]) and (y not in G[v]): # don't create parallel edges G.add_edge(u, x) G.add_edge(v, y) G.remove_edge(u, v) G.remove_edge(x, y) swapcount += 1 if n >= max_tries: e = ( f"Maximum number of swap attempts ({n}) exceeded " f"before desired swaps achieved ({nswap})." ) raise nx.NetworkXAlgorithmError(e) n += 1 return G @py_random_state(3)
Swap two edges in the graph while keeping the node degrees fixed. A double-edge swap removes two randomly chosen edges u-v and x-y and creates the new edges u-x and v-y:: u--v u v becomes | | x--y x y If either the edge u-x or v-y already exist no swap is performed and another attempt is made to find a suitable edge pair. Parameters ---------- G : graph An undirected graph nswap : integer (optional, default=1) Number of double-edge swaps to perform max_tries : integer (optional) Maximum number of attempts to swap edges seed : integer, random_state, or None (default) Indicator of random number generation state. See :ref:`Randomness<randomness>`. Returns ------- G : graph The graph after double edge swaps. Notes ----- Does not enforce any connectivity constraints. The graph G is modified in place.
135
228
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def double_edge_swap(G, nswap=1, max_tries=100, seed=None): if G.is_directed(): raise nx.NetworkXError( "double_edge_swap() not defined for directed graphs. Use directed_edge_swap instead." ) if nswap > max_tries: raise nx.NetworkXError("Number of swaps > number of tries allowed.") if len(G) < 4: raise nx.NetworkXError("Graph has less than four nodes.") # Instead of choosing uniformly at random from a generated edge list, # this algorithm chooses nonuniformly from the set of nodes with # probability weighted by degree. n = 0 swapcount = 0 keys, degrees = zip(*G.degree()) # keys, degree cdf = nx.utils.cumulative_distribution(degrees) # cdf of degree discrete_sequence = nx.utils.discrete_sequence while swapcount < nswap: # if random.random() < 0.5: continue # trick to avoid periodicities? # pick two random edges without creating edge list # choose source node indices from discrete distribution (ui, xi) = discrete_sequence(2, cdistribution=cdf, seed=seed) if ui == xi: continue # same source, skip u = keys[ui] # convert index to label x = keys[xi] # choose target uniformly from neighbors v = seed.choice(list(G[u])) y = seed.choice(list(G[x])) if v == y: continue # same target, skip if (x not in G[u]) and (y not in G[v]): # don't create parallel edges G.add_edge(u, x) G.add_edge(v, y) G.remove_edge(u, v) G.remove_edge(x, y) swapcount += 1 if n >= max_tries: e = ( f"Maximum number of swap attempts ({n}) exceeded " f"before desired swaps achieved ({nswap})." ) raise nx.NetworkXAlgorithmError(e) n += 1 return G @py_random_state(3) ``` ###Assistant : Swap two edges in the graph while keeping the node degrees fixed. A double-edge swap removes two randomly chosen edges u-v and x-y and creates the new edges u-x and v-y:: u--v u v becomes | | x--y x y If either the edge u-x or v-y already exist no swap is performed and another attempt is made to find a suitable edge pair. Parameters ---------- G : graph An undirected graph nswap : integer (optional, default=1) Number of double-edge swaps to perform max_tries : integer (optional) Maximum number of attempts to swap edges seed : integer, random_state, or None (default) Indicator of random number generation state. See :ref:`Randomness<randomness>`. Returns ------- G : graph The graph after double edge swaps. Notes ----- Does not enforce any connectivity constraints. The graph G is modified in place.
1,058
def test_dataset_shard_with_task_parallelization(self): config = { "input": "dataset", "input_config": { "format": "json", "paths": self.dset_path, "parallelism": 10, }, } NUM_WORKERS = 4 _, shards = get_dataset_and_shards(config, num_workers=NUM_WORKERS) assert len(shards) == NUM_WORKERS + 1 assert shards[0] is None assert all( isinstance(remote_shard, ray.data.Dataset) for remote_shard in shards[1:] )
Tests whether the dataset_shard function works correctly with parallelism for reading the dataset.
13
44
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_dataset_shard_with_task_parallelization(self): config = { "input": "dataset", "input_config": { "format": "json", "paths": self.dset_path, "parallelism": 10, }, } NUM_WORKERS = 4 _, shards = get_dataset_and_shards(config, num_workers=NUM_WORKERS) assert len(shards) == NUM_WORKERS + 1 assert shards[0] is None assert all( isinstance(remote_shard, ray.data.Dataset) for remote_shard in shards[1:] ) ``` ###Assistant : Tests whether the dataset_shard function works correctly with parallelism for reading the dataset.
1,059
def test_image_comparison_expect_rms(im1, im2, tol, expect_rms): baseline_dir, result_dir = map(Path, _image_directories(lambda: "dummy")) # Copy both "baseline" and "test" image to result_dir, so that 1) # compare_images writes the diff to result_dir, rather than to the source # tree and 2) the baseline image doesn't appear missing to triage_tests.py. uid = str(uuid.uuid4()) result_im1 = make_test_filename(result_dir / (uid + im1), "expected") shutil.copyfile(baseline_dir / im1, result_im1) result_im2 = result_dir / im1 shutil.copyfile(baseline_dir / im2, result_im2) results = compare_images( result_im1, result_im2, tol=tol, in_decorator=True) if expect_rms is None: assert results is None else: assert results is not None assert results['rms'] == approx(expect_rms, abs=1e-4)
Compare two images, expecting a particular RMS error. im1 and im2 are filenames relative to the baseline_dir directory. tol is the tolerance to pass to compare_images. expect_rms is the expected RMS value, or None. If None, the test will succeed if compare_images succeeds. Otherwise, the test will succeed if compare_images fails and returns an RMS error almost equal to this value.
61
97
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_image_comparison_expect_rms(im1, im2, tol, expect_rms): baseline_dir, result_dir = map(Path, _image_directories(lambda: "dummy")) # Copy both "baseline" and "test" image to result_dir, so that 1) # compare_images writes the diff to result_dir, rather than to the source # tree and 2) the baseline image doesn't appear missing to triage_tests.py. uid = str(uuid.uuid4()) result_im1 = make_test_filename(result_dir / (uid + im1), "expected") shutil.copyfile(baseline_dir / im1, result_im1) result_im2 = result_dir / im1 shutil.copyfile(baseline_dir / im2, result_im2) results = compare_images( result_im1, result_im2, tol=tol, in_decorator=True) if expect_rms is None: assert results is None else: assert results is not None assert results['rms'] == approx(expect_rms, abs=1e-4) ``` ###Assistant : Compare two images, expecting a particular RMS error. im1 and im2 are filenames relative to the baseline_dir directory. tol is the tolerance to pass to compare_images. expect_rms is the expected RMS value, or None. If None, the test will succeed if compare_images succeeds. Otherwise, the test will succeed if compare_images fails and returns an RMS error almost equal to this value.
1,060
def test_versioned_symbols_reserialization(self): module_v2 = torch.jit.load(pytorch_test_dir + "/jit/fixtures/_test_serialization_subcmul_v2.pt") buffer = io.BytesIO() torch.jit.save(module_v2, buffer) buffer.seek(0) module_reserialized = torch.jit.load(buffer) subcmul_nodes = sum("subcmul" in n.kind() for n in module_reserialized.graph.nodes()) self.assertEqual(subcmul_nodes, 0)
Tests that loading and saving serialized Torchscript with a versioned symbol won't persist the original function and will inline the versioned builtin.
22
27
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_versioned_symbols_reserialization(self): module_v2 = torch.jit.load(pytorch_test_dir + "/jit/fixtures/_test_serialization_subcmul_v2.pt") buffer = io.BytesIO() torch.jit.save(module_v2, buffer) buffer.seek(0) module_reserialized = torch.jit.load(buffer) subcmul_nodes = sum("subcmul" in n.kind() for n in module_reserialized.graph.nodes()) self.assertEqual(subcmul_nodes, 0) ``` ###Assistant : Tests that loading and saving serialized Torchscript with a versioned symbol won't persist the original function and will inline the versioned builtin.
1,061
def test_stylesheet_apply_takes_final_rule_in_specificity_clash(): css = ".a {background: red; color: lime;} .b {background: blue;}" stylesheet = _make_stylesheet(css) node = DOMNode(classes="a b", id="c") stylesheet.apply(node) assert node.styles.color == Color(0, 255, 0) # color: lime assert node.styles.background == Color(0, 0, 255) # background: blue
.a and .b both contain background and have same specificity, so .b wins since it was declared last - the background should be blue.
24
39
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_stylesheet_apply_takes_final_rule_in_specificity_clash(): css = ".a {background: red; color: lime;} .b {background: blue;}" stylesheet = _make_stylesheet(css) node = DOMNode(classes="a b", id="c") stylesheet.apply(node) assert node.styles.color == Color(0, 255, 0) # color: lime assert node.styles.background == Color(0, 0, 255) # background: blue ``` ###Assistant : .a and .b both contain background and have same specificity, so .b wins since it was declared last - the background should be blue.
1,062
def upgrade(): conn = op.get_bind() if conn.dialect.name == 'sqlite': op.execute('PRAGMA foreign_keys=OFF') with op.batch_alter_table('ab_view_menu', schema=None) as batch_op: batch_op.create_unique_constraint(batch_op.f('ab_view_menu_name_uq'), ['name']) op.execute('PRAGMA foreign_keys=ON') elif conn.dialect.name == 'mysql': with op.batch_alter_table('ab_register_user', schema=None) as batch_op: batch_op.alter_column('username', existing_type=sa.String(256), nullable=False) batch_op.alter_column('email', existing_type=sa.String(256), nullable=False) with op.batch_alter_table('ab_user', schema=None) as batch_op: batch_op.alter_column('username', existing_type=sa.String(256), nullable=False) batch_op.alter_column('email', existing_type=sa.String(256), nullable=False) elif conn.dialect.name == 'mssql': with op.batch_alter_table('ab_register_user') as batch_op: # Drop the unique constraint on username and email constraints = get_mssql_table_constraints(conn, 'ab_register_user') for k, _ in constraints.get('UNIQUE').items(): batch_op.drop_constraint(k, type_='unique') batch_op.alter_column('username', existing_type=sa.String(256), nullable=False) batch_op.create_unique_constraint(None, ['username']) batch_op.alter_column('email', existing_type=sa.String(256), nullable=False) with op.batch_alter_table('ab_user') as batch_op: # Drop the unique constraint on username and email constraints = get_mssql_table_constraints(conn, 'ab_user') for k, _ in constraints.get('UNIQUE').items(): batch_op.drop_constraint(k, type_='unique') batch_op.alter_column('username', existing_type=sa.String(256), nullable=False) batch_op.create_unique_constraint(None, ['username']) batch_op.alter_column('email', existing_type=sa.String(256), nullable=False) batch_op.create_unique_constraint(None, ['email'])
Apply Update migration for FAB tables to add missing constraints
10
116
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def upgrade(): conn = op.get_bind() if conn.dialect.name == 'sqlite': op.execute('PRAGMA foreign_keys=OFF') with op.batch_alter_table('ab_view_menu', schema=None) as batch_op: batch_op.create_unique_constraint(batch_op.f('ab_view_menu_name_uq'), ['name']) op.execute('PRAGMA foreign_keys=ON') elif conn.dialect.name == 'mysql': with op.batch_alter_table('ab_register_user', schema=None) as batch_op: batch_op.alter_column('username', existing_type=sa.String(256), nullable=False) batch_op.alter_column('email', existing_type=sa.String(256), nullable=False) with op.batch_alter_table('ab_user', schema=None) as batch_op: batch_op.alter_column('username', existing_type=sa.String(256), nullable=False) batch_op.alter_column('email', existing_type=sa.String(256), nullable=False) elif conn.dialect.name == 'mssql': with op.batch_alter_table('ab_register_user') as batch_op: # Drop the unique constraint on username and email constraints = get_mssql_table_constraints(conn, 'ab_register_user') for k, _ in constraints.get('UNIQUE').items(): batch_op.drop_constraint(k, type_='unique') batch_op.alter_column('username', existing_type=sa.String(256), nullable=False) batch_op.create_unique_constraint(None, ['username']) batch_op.alter_column('email', existing_type=sa.String(256), nullable=False) with op.batch_alter_table('ab_user') as batch_op: # Drop the unique constraint on username and email constraints = get_mssql_table_constraints(conn, 'ab_user') for k, _ in constraints.get('UNIQUE').items(): batch_op.drop_constraint(k, type_='unique') batch_op.alter_column('username', existing_type=sa.String(256), nullable=False) batch_op.create_unique_constraint(None, ['username']) batch_op.alter_column('email', existing_type=sa.String(256), nullable=False) batch_op.create_unique_constraint(None, ['email']) ``` ###Assistant : Apply Update migration for FAB tables to add missing constraints
1,063
def upgrade(): op.drop_table('ai_table') conn = op.get_bind() # views was created with unnamed fk. Therefore need recreate it op.create_table( 'view_tmp', sa.Column('id', sa.Integer(), nullable=False), sa.Column('name', sa.String(), nullable=False), sa.Column('company_id', sa.Integer(), nullable=True), sa.Column('query', sa.String(), nullable=False), sa.Column('integration_id', sa.Integer(), nullable=False), sa.ForeignKeyConstraint(['integration_id'], ['integration.id'], name='fk_integration_id'), sa.PrimaryKeyConstraint('id'), sa.UniqueConstraint('name', 'company_id', name='unique_name_company_id') ) conn.execute(text()) op.drop_table('view') op.rename_table('view_tmp', 'view') op.create_table( 'analysis', sa.Column('id', sa.Integer(), nullable=False), sa.Column('analysis', mindsdb.interfaces.storage.db.Json(), nullable=False), sa.Column('created_at', sa.DateTime(), nullable=True), sa.Column('updated_at', sa.DateTime(), nullable=True), sa.PrimaryKeyConstraint('id') ) with op.batch_alter_table('datasource', schema=None) as batch_op: batch_op.add_column(sa.Column('analysis_id', sa.Integer(), nullable=True)) batch_op.create_foreign_key('fk_analysis_id', 'analysis', ['analysis_id'], ['id']) batch_op.add_column(sa.Column('ds_class', sa.String(), nullable=True)) session = sa.orm.Session(bind=conn) dsatasources = conn.execute('select id, analysis from datasource').fetchall() for row in dsatasources: if row['analysis'] is not None: # NOTE 'returning' is relatively new in sqlite, so better will be use select after insert. conn.execute( text(), { 'id': row['id'] } ) analysis_id = conn.execute(text()).fetchall() conn.execute( text(), { 'analysis_id': analysis_id[0][0], 'id': row['id'] } ) with op.batch_alter_table('datasource', schema=None) as batch_op: batch_op.drop_column('analysis') op.create_table( 'file', sa.Column('id', sa.Integer(), nullable=False), sa.Column('name', sa.String(), nullable=False), sa.Column('company_id', sa.Integer(), nullable=True), sa.Column('source_file_path', sa.String(), nullable=False), sa.Column('file_path', sa.String(), nullable=False), sa.Column('row_count', sa.Integer(), nullable=False), sa.Column('columns', mindsdb.interfaces.storage.db.Json(), nullable=False), # sa.Column('created_at', sa.DateTime(), nullable=True, server_default=sa.func.current_timestamp()), # ????? # sa.Column('updated_at', sa.DateTime(), nullable=True, server_default=sa.func.current_timestamp(), server_onupdate=sa.func.current_timestamp()), # ????? erver_default=func.now() # sa.Column('created_at', sa.DateTime(), nullable=True, server_default=datetime.datetime.now), # ????? # sa.Column('updated_at', sa.DateTime(), nullable=True, server_default=datetime.datetime.now, server_onupdate=datetime.datetime.now), # ????? erver_default=func.now() sa.Column('created_at', sa.DateTime(), nullable=True, server_default=sa.func.current_timestamp()), # ????? sa.Column('updated_at', sa.DateTime(), nullable=True, server_default=sa.func.current_timestamp(), server_onupdate=sa.func.current_timestamp()), # ????? erver_default=func.now() sa.Column('analysis_id', sa.Integer(), nullable=True), sa.ForeignKeyConstraint(['analysis_id'], ['analysis.id'], name='fk_analysis_id'), sa.PrimaryKeyConstraint('id'), sa.UniqueConstraint('name', 'company_id', name='unique_name_company_id') ) # delete ds where data is none dsatasources = conn.execute(text('select * from datasource')).fetchall() for ds in dsatasources: if ds['data'] is None: conn.execute(text('delete from datasource where id = :id'), {'id': ds['id']}) continue ds_data = json.loads(ds['data']) creation_info = json.loads(ds['creation_info']) datasource_name = ds_data.get('source_type') if datasource_name == 'file': created_at = None if isinstance(ds['created_at'], str): created_at = datetime.datetime.fromisoformat(ds['created_at']) elif isinstance(ds['created_at'], [float, int]): created_at = datetime.fromtimestamp(ds['created_at']) updated_at = None if isinstance(ds['updated_at'], str): updated_at = datetime.datetime.fromisoformat(ds['updated_at']) elif isinstance(ds['updated_at'], [float, int]): updated_at = datetime.fromtimestamp(ds['updated_at']) file = mindsdb.interfaces.storage.db.File( name=ds['name'], company_id=ds['company_id'], source_file_path=ds_data['source'], file_path=creation_info['args'][0], row_count=ds_data['row_count'], columns=ds_data['columns'], created_at=created_at, updated_at=updated_at, analysis_id=ds['analysis_id'] ) session.add(file) conn.execute( text(), { 'datasource_name': datasource_name, 'company_id': ds['company_id'], 'ds_class': creation_info['class'], 'id': ds['id'] } ) session.commit() op.rename_table('datasource', 'dataset') with op.batch_alter_table('dataset', schema=None) as batch_op: batch_op.create_foreign_key('fk_integration_id', 'integration', ['integration_id'], ['id']) # NOTE two different 'batch' is necessary, in other way FK is not creating with op.batch_alter_table('predictor', schema=None) as batch_op: batch_op.alter_column('datasource_id', new_column_name='dataset_id') with op.batch_alter_table('predictor', schema=None) as batch_op: batch_op.create_foreign_key('fk_dataset_id', 'dataset', ['dataset_id'], ['id']) with op.batch_alter_table('predictor', schema=None) as batch_op: batch_op.create_unique_constraint('unique_name_company_id', ['name', 'company_id']) with op.batch_alter_table('integration', schema=None) as batch_op: batch_op.create_unique_constraint('unique_name_company_id', ['name', 'company_id']) with op.batch_alter_table('dataset', schema=None) as batch_op: batch_op.create_unique_constraint('unique_name_company_id', ['name', 'company_id'])
insert into view_tmp (id, name, company_id, query, integration_id) select id, name, company_id, query, datasource_id from view; insert into analysis (analysis) select analysis from datasource where id = :id; select id from analysis order by id desc limit 1; update datasource set analysis_id = :analysis_id where id = :id update datasource set integration_id = (select id from integration where name = :datasource_name and company_id = :company_id), ds_class = :ds_class where id = :id
72
386
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def upgrade(): op.drop_table('ai_table') conn = op.get_bind() # views was created with unnamed fk. Therefore need recreate it op.create_table( 'view_tmp', sa.Column('id', sa.Integer(), nullable=False), sa.Column('name', sa.String(), nullable=False), sa.Column('company_id', sa.Integer(), nullable=True), sa.Column('query', sa.String(), nullable=False), sa.Column('integration_id', sa.Integer(), nullable=False), sa.ForeignKeyConstraint(['integration_id'], ['integration.id'], name='fk_integration_id'), sa.PrimaryKeyConstraint('id'), sa.UniqueConstraint('name', 'company_id', name='unique_name_company_id') ) conn.execute(text()) op.drop_table('view') op.rename_table('view_tmp', 'view') op.create_table( 'analysis', sa.Column('id', sa.Integer(), nullable=False), sa.Column('analysis', mindsdb.interfaces.storage.db.Json(), nullable=False), sa.Column('created_at', sa.DateTime(), nullable=True), sa.Column('updated_at', sa.DateTime(), nullable=True), sa.PrimaryKeyConstraint('id') ) with op.batch_alter_table('datasource', schema=None) as batch_op: batch_op.add_column(sa.Column('analysis_id', sa.Integer(), nullable=True)) batch_op.create_foreign_key('fk_analysis_id', 'analysis', ['analysis_id'], ['id']) batch_op.add_column(sa.Column('ds_class', sa.String(), nullable=True)) session = sa.orm.Session(bind=conn) dsatasources = conn.execute('select id, analysis from datasource').fetchall() for row in dsatasources: if row['analysis'] is not None: # NOTE 'returning' is relatively new in sqlite, so better will be use select after insert. conn.execute( text(), { 'id': row['id'] } ) analysis_id = conn.execute(text()).fetchall() conn.execute( text(), { 'analysis_id': analysis_id[0][0], 'id': row['id'] } ) with op.batch_alter_table('datasource', schema=None) as batch_op: batch_op.drop_column('analysis') op.create_table( 'file', sa.Column('id', sa.Integer(), nullable=False), sa.Column('name', sa.String(), nullable=False), sa.Column('company_id', sa.Integer(), nullable=True), sa.Column('source_file_path', sa.String(), nullable=False), sa.Column('file_path', sa.String(), nullable=False), sa.Column('row_count', sa.Integer(), nullable=False), sa.Column('columns', mindsdb.interfaces.storage.db.Json(), nullable=False), # sa.Column('created_at', sa.DateTime(), nullable=True, server_default=sa.func.current_timestamp()), # ????? # sa.Column('updated_at', sa.DateTime(), nullable=True, server_default=sa.func.current_timestamp(), server_onupdate=sa.func.current_timestamp()), # ????? erver_default=func.now() # sa.Column('created_at', sa.DateTime(), nullable=True, server_default=datetime.datetime.now), # ????? # sa.Column('updated_at', sa.DateTime(), nullable=True, server_default=datetime.datetime.now, server_onupdate=datetime.datetime.now), # ????? erver_default=func.now() sa.Column('created_at', sa.DateTime(), nullable=True, server_default=sa.func.current_timestamp()), # ????? sa.Column('updated_at', sa.DateTime(), nullable=True, server_default=sa.func.current_timestamp(), server_onupdate=sa.func.current_timestamp()), # ????? erver_default=func.now() sa.Column('analysis_id', sa.Integer(), nullable=True), sa.ForeignKeyConstraint(['analysis_id'], ['analysis.id'], name='fk_analysis_id'), sa.PrimaryKeyConstraint('id'), sa.UniqueConstraint('name', 'company_id', name='unique_name_company_id') ) # delete ds where data is none dsatasources = conn.execute(text('select * from datasource')).fetchall() for ds in dsatasources: if ds['data'] is None: conn.execute(text('delete from datasource where id = :id'), {'id': ds['id']}) continue ds_data = json.loads(ds['data']) creation_info = json.loads(ds['creation_info']) datasource_name = ds_data.get('source_type') if datasource_name == 'file': created_at = None if isinstance(ds['created_at'], str): created_at = datetime.datetime.fromisoformat(ds['created_at']) elif isinstance(ds['created_at'], [float, int]): created_at = datetime.fromtimestamp(ds['created_at']) updated_at = None if isinstance(ds['updated_at'], str): updated_at = datetime.datetime.fromisoformat(ds['updated_at']) elif isinstance(ds['updated_at'], [float, int]): updated_at = datetime.fromtimestamp(ds['updated_at']) file = mindsdb.interfaces.storage.db.File( name=ds['name'], company_id=ds['company_id'], source_file_path=ds_data['source'], file_path=creation_info['args'][0], row_count=ds_data['row_count'], columns=ds_data['columns'], created_at=created_at, updated_at=updated_at, analysis_id=ds['analysis_id'] ) session.add(file) conn.execute( text(), { 'datasource_name': datasource_name, 'company_id': ds['company_id'], 'ds_class': creation_info['class'], 'id': ds['id'] } ) session.commit() op.rename_table('datasource', 'dataset') with op.batch_alter_table('dataset', schema=None) as batch_op: batch_op.create_foreign_key('fk_integration_id', 'integration', ['integration_id'], ['id']) # NOTE two different 'batch' is necessary, in other way FK is not creating with op.batch_alter_table('predictor', schema=None) as batch_op: batch_op.alter_column('datasource_id', new_column_name='dataset_id') with op.batch_alter_table('predictor', schema=None) as batch_op: batch_op.create_foreign_key('fk_dataset_id', 'dataset', ['dataset_id'], ['id']) with op.batch_alter_table('predictor', schema=None) as batch_op: batch_op.create_unique_constraint('unique_name_company_id', ['name', 'company_id']) with op.batch_alter_table('integration', schema=None) as batch_op: batch_op.create_unique_constraint('unique_name_company_id', ['name', 'company_id']) with op.batch_alter_table('dataset', schema=None) as batch_op: batch_op.create_unique_constraint('unique_name_company_id', ['name', 'company_id']) ``` ###Assistant : insert into view_tmp (id, name, company_id, query, integration_id) select id, name, company_id, query, datasource_id from view; insert into analysis (analysis) select analysis from datasource where id = :id; select id from analysis order by id desc limit 1; update datasource set analysis_id = :analysis_id where id = :id update datasource set integration_id = (select id from integration where name = :datasource_name and company_id = :company_id), ds_class = :ds_class where id = :id
1,064
def _predict_recursive(self, X, sample_weight, cluster_node): if cluster_node.left is None: # This cluster has no subcluster. Labels are just the label of the cluster. return np.full(X.shape[0], cluster_node.label, dtype=np.int32) # Determine if data points belong to the left or right subcluster centers = np.vstack((cluster_node.left.center, cluster_node.right.center)) if hasattr(self, "_X_mean"): centers += self._X_mean cluster_labels = _labels_inertia_threadpool_limit( X, sample_weight, centers, self._n_threads, return_inertia=False, ) mask = cluster_labels == 0 # Compute the labels for each subset of the data points. labels = np.full(X.shape[0], -1, dtype=np.int32) labels[mask] = self._predict_recursive( X[mask], sample_weight[mask], cluster_node.left ) labels[~mask] = self._predict_recursive( X[~mask], sample_weight[~mask], cluster_node.right ) return labels
Predict recursively by going down the hierarchical tree. Parameters ---------- X : {ndarray, csr_matrix} of shape (n_samples, n_features) The data points, currently assigned to `cluster_node`, to predict between the subclusters of this node. sample_weight : ndarray of shape (n_samples,) The weights for each observation in X. cluster_node : _BisectingTree node object The cluster node of the hierarchical tree. Returns ------- labels : ndarray of shape (n_samples,) Index of the cluster each sample belongs to.
74
95
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _predict_recursive(self, X, sample_weight, cluster_node): if cluster_node.left is None: # This cluster has no subcluster. Labels are just the label of the cluster. return np.full(X.shape[0], cluster_node.label, dtype=np.int32) # Determine if data points belong to the left or right subcluster centers = np.vstack((cluster_node.left.center, cluster_node.right.center)) if hasattr(self, "_X_mean"): centers += self._X_mean cluster_labels = _labels_inertia_threadpool_limit( X, sample_weight, centers, self._n_threads, return_inertia=False, ) mask = cluster_labels == 0 # Compute the labels for each subset of the data points. labels = np.full(X.shape[0], -1, dtype=np.int32) labels[mask] = self._predict_recursive( X[mask], sample_weight[mask], cluster_node.left ) labels[~mask] = self._predict_recursive( X[~mask], sample_weight[~mask], cluster_node.right ) return labels ``` ###Assistant : Predict recursively by going down the hierarchical tree. Parameters ---------- X : {ndarray, csr_matrix} of shape (n_samples, n_features) The data points, currently assigned to `cluster_node`, to predict between the subclusters of this node. sample_weight : ndarray of shape (n_samples,) The weights for each observation in X. cluster_node : _BisectingTree node object The cluster node of the hierarchical tree. Returns ------- labels : ndarray of shape (n_samples,) Index of the cluster each sample belongs to.
1,065
def inference_voice_conversion(self, reference_wav, speaker_id=None, d_vector=None, reference_speaker_id=None, reference_d_vector=None): # compute spectrograms y = wav_to_spec(reference_wav, self.config.audio.fft_size, self.config.audio.hop_length, self.config.audio.win_length, center=False).transpose(1, 2) y_lengths = torch.tensor([y.size(-1)]).to(y.device) speaker_cond_src = reference_speaker_id if reference_speaker_id is not None else reference_d_vector speaker_cond_tgt = speaker_id if speaker_id is not None else d_vector # print(y.shape, y_lengths.shape) wav, _, _ = self.voice_conversion(y, y_lengths, speaker_cond_src, speaker_cond_tgt) return wav
Inference for voice conversion Args: reference_wav (Tensor): Reference wavform. Tensor of shape [B, T] speaker_id (Tensor): speaker_id of the target speaker. Tensor of shape [B] d_vector (Tensor): d_vector embedding of target speaker. Tensor of shape `[B, C]` reference_speaker_id (Tensor): speaker_id of the reference_wav speaker. Tensor of shape [B] reference_d_vector (Tensor): d_vector embedding of the reference_wav speaker. Tensor of shape `[B, C]`
61
54
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def inference_voice_conversion(self, reference_wav, speaker_id=None, d_vector=None, reference_speaker_id=None, reference_d_vector=None): # compute spectrograms y = wav_to_spec(reference_wav, self.config.audio.fft_size, self.config.audio.hop_length, self.config.audio.win_length, center=False).transpose(1, 2) y_lengths = torch.tensor([y.size(-1)]).to(y.device) speaker_cond_src = reference_speaker_id if reference_speaker_id is not None else reference_d_vector speaker_cond_tgt = speaker_id if speaker_id is not None else d_vector # print(y.shape, y_lengths.shape) wav, _, _ = self.voice_conversion(y, y_lengths, speaker_cond_src, speaker_cond_tgt) return wav ``` ###Assistant : Inference for voice conversion Args: reference_wav (Tensor): Reference wavform. Tensor of shape [B, T] speaker_id (Tensor): speaker_id of the target speaker. Tensor of shape [B] d_vector (Tensor): d_vector embedding of target speaker. Tensor of shape `[B, C]` reference_speaker_id (Tensor): speaker_id of the reference_wav speaker. Tensor of shape [B] reference_d_vector (Tensor): d_vector embedding of the reference_wav speaker. Tensor of shape `[B, C]`
1,066
def test_nonconflicting_specified_basename(self): self.router.register(r'notes', NoteViewSet, basename='notes') self.router.register(r'notes_kwduplicate', KWargedNoteViewSet, basename='notes_kwduplicate') self.router.register(r'notes_duplicate', NoteViewSet, basename='notes_duplicate')
Ensure 2 routers with the same model, and a distinct basename specified on each does not throw an exception
19
11
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_nonconflicting_specified_basename(self): self.router.register(r'notes', NoteViewSet, basename='notes') self.router.register(r'notes_kwduplicate', KWargedNoteViewSet, basename='notes_kwduplicate') self.router.register(r'notes_duplicate', NoteViewSet, basename='notes_duplicate') ``` ###Assistant : Ensure 2 routers with the same model, and a distinct basename specified on each does not throw an exception
1,067
async def test_state(hass, setup_comp): state = hass.states.get(COVER_GROUP) # No entity has a valid state -> group state unknown assert state.state == STATE_UNKNOWN assert state.attributes[ATTR_FRIENDLY_NAME] == DEFAULT_NAME assert state.attributes[ATTR_ENTITY_ID] == [ DEMO_COVER, DEMO_COVER_POS, DEMO_COVER_TILT, DEMO_TILT, ] assert ATTR_ASSUMED_STATE not in state.attributes assert state.attributes[ATTR_SUPPORTED_FEATURES] == 0 assert ATTR_CURRENT_POSITION not in state.attributes assert ATTR_CURRENT_TILT_POSITION not in state.attributes # The group state is unknown if all group members are unknown or unavailable. for state_1 in (STATE_UNAVAILABLE, STATE_UNKNOWN): for state_2 in (STATE_UNAVAILABLE, STATE_UNKNOWN): for state_3 in (STATE_UNAVAILABLE, STATE_UNKNOWN): hass.states.async_set(DEMO_COVER, state_1, {}) hass.states.async_set(DEMO_COVER_POS, state_2, {}) hass.states.async_set(DEMO_COVER_TILT, state_3, {}) hass.states.async_set(DEMO_TILT, STATE_UNAVAILABLE, {}) await hass.async_block_till_done() state = hass.states.get(COVER_GROUP) assert state.state == STATE_UNKNOWN for state_1 in (STATE_UNAVAILABLE, STATE_UNKNOWN): for state_2 in (STATE_UNAVAILABLE, STATE_UNKNOWN): for state_3 in (STATE_UNAVAILABLE, STATE_UNKNOWN): hass.states.async_set(DEMO_COVER, state_1, {}) hass.states.async_set(DEMO_COVER_POS, state_2, {}) hass.states.async_set(DEMO_COVER_TILT, state_3, {}) hass.states.async_set(DEMO_TILT, STATE_UNKNOWN, {}) await hass.async_block_till_done() state = hass.states.get(COVER_GROUP) assert state.state == STATE_UNKNOWN # At least one member opening -> group opening for state_1 in ( STATE_CLOSED, STATE_CLOSING, STATE_OPEN, STATE_OPENING, STATE_UNAVAILABLE, STATE_UNKNOWN, ): for state_2 in ( STATE_CLOSED, STATE_CLOSING, STATE_OPEN, STATE_OPENING, STATE_UNAVAILABLE, STATE_UNKNOWN, ): for state_3 in ( STATE_CLOSED, STATE_CLOSING, STATE_OPEN, STATE_OPENING, STATE_UNAVAILABLE, STATE_UNKNOWN, ): hass.states.async_set(DEMO_COVER, state_1, {}) hass.states.async_set(DEMO_COVER_POS, state_2, {}) hass.states.async_set(DEMO_COVER_TILT, state_3, {}) hass.states.async_set(DEMO_TILT, STATE_OPENING, {}) await hass.async_block_till_done() state = hass.states.get(COVER_GROUP) assert state.state == STATE_OPENING # At least one member closing -> group closing for state_1 in ( STATE_CLOSED, STATE_CLOSING, STATE_OPEN, STATE_UNAVAILABLE, STATE_UNKNOWN, ): for state_2 in ( STATE_CLOSED, STATE_CLOSING, STATE_OPEN, STATE_UNAVAILABLE, STATE_UNKNOWN, ): for state_3 in ( STATE_CLOSED, STATE_CLOSING, STATE_OPEN, STATE_UNAVAILABLE, STATE_UNKNOWN, ): hass.states.async_set(DEMO_COVER, state_1, {}) hass.states.async_set(DEMO_COVER_POS, state_2, {}) hass.states.async_set(DEMO_COVER_TILT, state_3, {}) hass.states.async_set(DEMO_TILT, STATE_CLOSING, {}) await hass.async_block_till_done() state = hass.states.get(COVER_GROUP) assert state.state == STATE_CLOSING # At least one member open -> group open for state_1 in (STATE_CLOSED, STATE_OPEN, STATE_UNAVAILABLE, STATE_UNKNOWN): for state_2 in (STATE_CLOSED, STATE_OPEN, STATE_UNAVAILABLE, STATE_UNKNOWN): for state_3 in (STATE_CLOSED, STATE_OPEN, STATE_UNAVAILABLE, STATE_UNKNOWN): hass.states.async_set(DEMO_COVER, state_1, {}) hass.states.async_set(DEMO_COVER_POS, state_2, {}) hass.states.async_set(DEMO_COVER_TILT, state_3, {}) hass.states.async_set(DEMO_TILT, STATE_OPEN, {}) await hass.async_block_till_done() state = hass.states.get(COVER_GROUP) assert state.state == STATE_OPEN # At least one member closed -> group closed for state_1 in (STATE_CLOSED, STATE_UNAVAILABLE, STATE_UNKNOWN): for state_2 in (STATE_CLOSED, STATE_UNAVAILABLE, STATE_UNKNOWN): for state_3 in (STATE_CLOSED, STATE_UNAVAILABLE, STATE_UNKNOWN): hass.states.async_set(DEMO_COVER, state_1, {}) hass.states.async_set(DEMO_COVER_POS, state_2, {}) hass.states.async_set(DEMO_COVER_TILT, state_3, {}) hass.states.async_set(DEMO_TILT, STATE_CLOSED, {}) await hass.async_block_till_done() state = hass.states.get(COVER_GROUP) assert state.state == STATE_CLOSED # All group members removed from the state machine -> unknown hass.states.async_remove(DEMO_COVER) hass.states.async_remove(DEMO_COVER_POS) hass.states.async_remove(DEMO_COVER_TILT) hass.states.async_remove(DEMO_TILT) await hass.async_block_till_done() state = hass.states.get(COVER_GROUP) assert state.state == STATE_UNKNOWN @pytest.mark.parametrize("config_count", [(CONFIG_ATTRIBUTES, 1)])
Test handling of state. The group state is unknown if all group members are unknown or unavailable. Otherwise, the group state is opening if at least one group member is opening. Otherwise, the group state is closing if at least one group member is closing. Otherwise, the group state is open if at least one group member is open. Otherwise, the group state is closed.
65
389
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python async def test_state(hass, setup_comp): state = hass.states.get(COVER_GROUP) # No entity has a valid state -> group state unknown assert state.state == STATE_UNKNOWN assert state.attributes[ATTR_FRIENDLY_NAME] == DEFAULT_NAME assert state.attributes[ATTR_ENTITY_ID] == [ DEMO_COVER, DEMO_COVER_POS, DEMO_COVER_TILT, DEMO_TILT, ] assert ATTR_ASSUMED_STATE not in state.attributes assert state.attributes[ATTR_SUPPORTED_FEATURES] == 0 assert ATTR_CURRENT_POSITION not in state.attributes assert ATTR_CURRENT_TILT_POSITION not in state.attributes # The group state is unknown if all group members are unknown or unavailable. for state_1 in (STATE_UNAVAILABLE, STATE_UNKNOWN): for state_2 in (STATE_UNAVAILABLE, STATE_UNKNOWN): for state_3 in (STATE_UNAVAILABLE, STATE_UNKNOWN): hass.states.async_set(DEMO_COVER, state_1, {}) hass.states.async_set(DEMO_COVER_POS, state_2, {}) hass.states.async_set(DEMO_COVER_TILT, state_3, {}) hass.states.async_set(DEMO_TILT, STATE_UNAVAILABLE, {}) await hass.async_block_till_done() state = hass.states.get(COVER_GROUP) assert state.state == STATE_UNKNOWN for state_1 in (STATE_UNAVAILABLE, STATE_UNKNOWN): for state_2 in (STATE_UNAVAILABLE, STATE_UNKNOWN): for state_3 in (STATE_UNAVAILABLE, STATE_UNKNOWN): hass.states.async_set(DEMO_COVER, state_1, {}) hass.states.async_set(DEMO_COVER_POS, state_2, {}) hass.states.async_set(DEMO_COVER_TILT, state_3, {}) hass.states.async_set(DEMO_TILT, STATE_UNKNOWN, {}) await hass.async_block_till_done() state = hass.states.get(COVER_GROUP) assert state.state == STATE_UNKNOWN # At least one member opening -> group opening for state_1 in ( STATE_CLOSED, STATE_CLOSING, STATE_OPEN, STATE_OPENING, STATE_UNAVAILABLE, STATE_UNKNOWN, ): for state_2 in ( STATE_CLOSED, STATE_CLOSING, STATE_OPEN, STATE_OPENING, STATE_UNAVAILABLE, STATE_UNKNOWN, ): for state_3 in ( STATE_CLOSED, STATE_CLOSING, STATE_OPEN, STATE_OPENING, STATE_UNAVAILABLE, STATE_UNKNOWN, ): hass.states.async_set(DEMO_COVER, state_1, {}) hass.states.async_set(DEMO_COVER_POS, state_2, {}) hass.states.async_set(DEMO_COVER_TILT, state_3, {}) hass.states.async_set(DEMO_TILT, STATE_OPENING, {}) await hass.async_block_till_done() state = hass.states.get(COVER_GROUP) assert state.state == STATE_OPENING # At least one member closing -> group closing for state_1 in ( STATE_CLOSED, STATE_CLOSING, STATE_OPEN, STATE_UNAVAILABLE, STATE_UNKNOWN, ): for state_2 in ( STATE_CLOSED, STATE_CLOSING, STATE_OPEN, STATE_UNAVAILABLE, STATE_UNKNOWN, ): for state_3 in ( STATE_CLOSED, STATE_CLOSING, STATE_OPEN, STATE_UNAVAILABLE, STATE_UNKNOWN, ): hass.states.async_set(DEMO_COVER, state_1, {}) hass.states.async_set(DEMO_COVER_POS, state_2, {}) hass.states.async_set(DEMO_COVER_TILT, state_3, {}) hass.states.async_set(DEMO_TILT, STATE_CLOSING, {}) await hass.async_block_till_done() state = hass.states.get(COVER_GROUP) assert state.state == STATE_CLOSING # At least one member open -> group open for state_1 in (STATE_CLOSED, STATE_OPEN, STATE_UNAVAILABLE, STATE_UNKNOWN): for state_2 in (STATE_CLOSED, STATE_OPEN, STATE_UNAVAILABLE, STATE_UNKNOWN): for state_3 in (STATE_CLOSED, STATE_OPEN, STATE_UNAVAILABLE, STATE_UNKNOWN): hass.states.async_set(DEMO_COVER, state_1, {}) hass.states.async_set(DEMO_COVER_POS, state_2, {}) hass.states.async_set(DEMO_COVER_TILT, state_3, {}) hass.states.async_set(DEMO_TILT, STATE_OPEN, {}) await hass.async_block_till_done() state = hass.states.get(COVER_GROUP) assert state.state == STATE_OPEN # At least one member closed -> group closed for state_1 in (STATE_CLOSED, STATE_UNAVAILABLE, STATE_UNKNOWN): for state_2 in (STATE_CLOSED, STATE_UNAVAILABLE, STATE_UNKNOWN): for state_3 in (STATE_CLOSED, STATE_UNAVAILABLE, STATE_UNKNOWN): hass.states.async_set(DEMO_COVER, state_1, {}) hass.states.async_set(DEMO_COVER_POS, state_2, {}) hass.states.async_set(DEMO_COVER_TILT, state_3, {}) hass.states.async_set(DEMO_TILT, STATE_CLOSED, {}) await hass.async_block_till_done() state = hass.states.get(COVER_GROUP) assert state.state == STATE_CLOSED # All group members removed from the state machine -> unknown hass.states.async_remove(DEMO_COVER) hass.states.async_remove(DEMO_COVER_POS) hass.states.async_remove(DEMO_COVER_TILT) hass.states.async_remove(DEMO_TILT) await hass.async_block_till_done() state = hass.states.get(COVER_GROUP) assert state.state == STATE_UNKNOWN @pytest.mark.parametrize("config_count", [(CONFIG_ATTRIBUTES, 1)]) ``` ###Assistant : Test handling of state. The group state is unknown if all group members are unknown or unavailable. Otherwise, the group state is opening if at least one group member is opening. Otherwise, the group state is closing if at least one group member is closing. Otherwise, the group state is open if at least one group member is open. Otherwise, the group state is closed.
1,068
def connect(self, signal, func): if self._signals is not None: _api.check_in_list(self._signals, signal=signal) self._func_cid_map.setdefault(signal, {}) proxy = _weak_or_strong_ref(func, self._remove_proxy) if proxy in self._func_cid_map[signal]: return self._func_cid_map[signal][proxy] cid = next(self._cid_gen) self._func_cid_map[signal][proxy] = cid self.callbacks.setdefault(signal, {}) self.callbacks[signal][cid] = proxy return cid
Register *func* to be called when signal *signal* is generated.
10
36
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def connect(self, signal, func): if self._signals is not None: _api.check_in_list(self._signals, signal=signal) self._func_cid_map.setdefault(signal, {}) proxy = _weak_or_strong_ref(func, self._remove_proxy) if proxy in self._func_cid_map[signal]: return self._func_cid_map[signal][proxy] cid = next(self._cid_gen) self._func_cid_map[signal][proxy] = cid self.callbacks.setdefault(signal, {}) self.callbacks[signal][cid] = proxy return cid ``` ###Assistant : Register *func* to be called when signal *signal* is generated.
1,069
def get_or_create_account(company_name, account): default_root_type = "Liability" root_type = account.get("root_type", default_root_type) existing_accounts = frappe.get_all( "Account", filters={"company": company_name, "root_type": root_type}, or_filters={ "account_name": account.get("account_name"), "account_number": account.get("account_number"), }, ) if existing_accounts: return frappe.get_doc("Account", existing_accounts[0].name) tax_group = get_or_create_tax_group(company_name, root_type) account["doctype"] = "Account" account["company"] = company_name account["parent_account"] = tax_group account["report_type"] = "Balance Sheet" account["account_type"] = "Tax" account["root_type"] = root_type account["is_group"] = 0 doc = frappe.get_doc(account) doc.flags.ignore_links = True doc.flags.ignore_validate = True doc.insert(ignore_permissions=True, ignore_mandatory=True) return doc
Check if account already exists. If not, create it. Return a tax account or None.
15
69
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_or_create_account(company_name, account): default_root_type = "Liability" root_type = account.get("root_type", default_root_type) existing_accounts = frappe.get_all( "Account", filters={"company": company_name, "root_type": root_type}, or_filters={ "account_name": account.get("account_name"), "account_number": account.get("account_number"), }, ) if existing_accounts: return frappe.get_doc("Account", existing_accounts[0].name) tax_group = get_or_create_tax_group(company_name, root_type) account["doctype"] = "Account" account["company"] = company_name account["parent_account"] = tax_group account["report_type"] = "Balance Sheet" account["account_type"] = "Tax" account["root_type"] = root_type account["is_group"] = 0 doc = frappe.get_doc(account) doc.flags.ignore_links = True doc.flags.ignore_validate = True doc.insert(ignore_permissions=True, ignore_mandatory=True) return doc ``` ###Assistant : Check if account already exists. If not, create it. Return a tax account or None.
1,070
def set_default_options(self) -> None: default = self.cli_opts.get_option_values() logger.debug(default) self._gui_objects.default_options = default self.project.set_default_options()
Set the default options for :mod:`lib.gui.projects` The Default GUI options are stored on Faceswap startup. Exposed as the :attr:`_default_opts` for a project cannot be set until after the main Command Tabs have been loaded.
34
12
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def set_default_options(self) -> None: default = self.cli_opts.get_option_values() logger.debug(default) self._gui_objects.default_options = default self.project.set_default_options() ``` ###Assistant : Set the default options for :mod:`lib.gui.projects` The Default GUI options are stored on Faceswap startup. Exposed as the :attr:`_default_opts` for a project cannot be set until after the main Command Tabs have been loaded.
1,071
def _format_lines(self, tokensource): nocls = self.noclasses lsep = self.lineseparator tagsfile = self.tagsfile lspan = '' line = [] for ttype, value in tokensource: try: cspan = self.span_element_openers[ttype] except KeyError: title = ' title="%s"' % '.'.join(ttype) if self.debug_token_types else '' if nocls: css_style = self._get_css_inline_styles(ttype) if css_style: css_style = self.class2style[css_style][0] cspan = '<span style="%s"%s>' % (css_style, title) else: cspan = '' else: css_class = self._get_css_classes(ttype) if css_class: cspan = '<span class="%s"%s>' % (css_class, title) else: cspan = '' self.span_element_openers[ttype] = cspan parts = self._translate_parts(value) if tagsfile and ttype in Token.Name: filename, linenumber = self._lookup_ctag(value) if linenumber: base, filename = os.path.split(filename) if base: base += '/' filename, extension = os.path.splitext(filename) url = self.tagurlformat % {'path': base, 'fname': filename, 'fext': extension} parts[0] = "<a href=\"%s#%s-%d\">%s" % \ (url, self.lineanchors, linenumber, parts[0]) parts[-1] = parts[-1] + "</a>" # for all but the last line for part in parts[:-1]: if line: if lspan != cspan: line.extend(((lspan and '</span>'), cspan, part, (cspan and '</span>'), lsep)) else: # both are the same line.extend((part, (lspan and '</span>'), lsep)) yield 1, ''.join(line) line = [] elif part: yield 1, ''.join((cspan, part, (cspan and '</span>'), lsep)) else: yield 1, lsep # for the last line if line and parts[-1]: if lspan != cspan: line.extend(((lspan and '</span>'), cspan, parts[-1])) lspan = cspan else: line.append(parts[-1]) elif parts[-1]: line = [cspan, parts[-1]] lspan = cspan # else we neither have to open a new span nor set lspan if line: line.extend(((lspan and '</span>'), lsep)) yield 1, ''.join(line)
Just format the tokens, without any wrapping tags. Yield individual lines.
11
244
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _format_lines(self, tokensource): nocls = self.noclasses lsep = self.lineseparator tagsfile = self.tagsfile lspan = '' line = [] for ttype, value in tokensource: try: cspan = self.span_element_openers[ttype] except KeyError: title = ' title="%s"' % '.'.join(ttype) if self.debug_token_types else '' if nocls: css_style = self._get_css_inline_styles(ttype) if css_style: css_style = self.class2style[css_style][0] cspan = '<span style="%s"%s>' % (css_style, title) else: cspan = '' else: css_class = self._get_css_classes(ttype) if css_class: cspan = '<span class="%s"%s>' % (css_class, title) else: cspan = '' self.span_element_openers[ttype] = cspan parts = self._translate_parts(value) if tagsfile and ttype in Token.Name: filename, linenumber = self._lookup_ctag(value) if linenumber: base, filename = os.path.split(filename) if base: base += '/' filename, extension = os.path.splitext(filename) url = self.tagurlformat % {'path': base, 'fname': filename, 'fext': extension} parts[0] = "<a href=\"%s#%s-%d\">%s" % \ (url, self.lineanchors, linenumber, parts[0]) parts[-1] = parts[-1] + "</a>" # for all but the last line for part in parts[:-1]: if line: if lspan != cspan: line.extend(((lspan and '</span>'), cspan, part, (cspan and '</span>'), lsep)) else: # both are the same line.extend((part, (lspan and '</span>'), lsep)) yield 1, ''.join(line) line = [] elif part: yield 1, ''.join((cspan, part, (cspan and '</span>'), lsep)) else: yield 1, lsep # for the last line if line and parts[-1]: if lspan != cspan: line.extend(((lspan and '</span>'), cspan, parts[-1])) lspan = cspan else: line.append(parts[-1]) elif parts[-1]: line = [cspan, parts[-1]] lspan = cspan # else we neither have to open a new span nor set lspan if line: line.extend(((lspan and '</span>'), lsep)) yield 1, ''.join(line) ``` ###Assistant : Just format the tokens, without any wrapping tags. Yield individual lines.
1,072
def _get_remote_resource(self) -> Optional[Union[SourceRead, DestinationRead, ConnectionRead]]: search_results = self._search().get(f"{self.resource_type}s", []) if len(search_results) > 1: raise DuplicateResourceError("Two or more ressources exist with the same name.") if len(search_results) == 1: return search_results[0] else: return None
Find the remote resource on the Airbyte instance associated with the current resource. Raises: DuplicateResourceError: raised if the search results return multiple resources. Returns: Optional[Union[SourceRead, DestinationRead, ConnectionRead]]: The remote resource found.
31
33
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _get_remote_resource(self) -> Optional[Union[SourceRead, DestinationRead, ConnectionRead]]: search_results = self._search().get(f"{self.resource_type}s", []) if len(search_results) > 1: raise DuplicateResourceError("Two or more ressources exist with the same name.") if len(search_results) == 1: return search_results[0] else: return None ``` ###Assistant : Find the remote resource on the Airbyte instance associated with the current resource. Raises: DuplicateResourceError: raised if the search results return multiple resources. Returns: Optional[Union[SourceRead, DestinationRead, ConnectionRead]]: The remote resource found.
1,073
def _get_compile_args(self, user_metrics=True): self._assert_compile_was_called() # pylint: disable=protected-access saved_metrics = self.compiled_metrics._user_metrics saved_weighted_metrics = self.compiled_metrics._user_weighted_metrics if not user_metrics: if saved_metrics is not None: saved_metrics = self.compiled_metrics._metrics if saved_weighted_metrics is not None: saved_weighted_metrics = self.compiled_metrics._weighted_metrics compile_args = { "optimizer": self.optimizer, "loss": self.compiled_loss._user_losses, "metrics": saved_metrics, "weighted_metrics": saved_weighted_metrics, "loss_weights": self.compiled_loss._user_loss_weights, } # pylint: enable=protected-access return compile_args
Used for saving or cloning a Model. Args: user_metrics: Whether to return user-supplied metrics or `Metric` objects. Defaults to returning the user-supplied metrics. Returns: Dictionary of arguments that were used when compiling the model.
34
51
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _get_compile_args(self, user_metrics=True): self._assert_compile_was_called() # pylint: disable=protected-access saved_metrics = self.compiled_metrics._user_metrics saved_weighted_metrics = self.compiled_metrics._user_weighted_metrics if not user_metrics: if saved_metrics is not None: saved_metrics = self.compiled_metrics._metrics if saved_weighted_metrics is not None: saved_weighted_metrics = self.compiled_metrics._weighted_metrics compile_args = { "optimizer": self.optimizer, "loss": self.compiled_loss._user_losses, "metrics": saved_metrics, "weighted_metrics": saved_weighted_metrics, "loss_weights": self.compiled_loss._user_loss_weights, } # pylint: enable=protected-access return compile_args ``` ###Assistant : Used for saving or cloning a Model. Args: user_metrics: Whether to return user-supplied metrics or `Metric` objects. Defaults to returning the user-supplied metrics. Returns: Dictionary of arguments that were used when compiling the model.
1,074
def test_norestexdoc(capfd, hello_world_f90, monkeypatch): ipath = Path(hello_world_f90) mname = "blah" monkeypatch.setattr(sys, "argv", f'f2py -m {mname} {ipath} --no-rest-doc'.split()) with util.switchdir(ipath.parent): f2pycli() out, _ = capfd.readouterr() assert "ReST Documentation is saved to file" not in out
Ensures that TeX documentation is written out CLI :: --no-rest-doc
10
34
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_norestexdoc(capfd, hello_world_f90, monkeypatch): ipath = Path(hello_world_f90) mname = "blah" monkeypatch.setattr(sys, "argv", f'f2py -m {mname} {ipath} --no-rest-doc'.split()) with util.switchdir(ipath.parent): f2pycli() out, _ = capfd.readouterr() assert "ReST Documentation is saved to file" not in out ``` ###Assistant : Ensures that TeX documentation is written out CLI :: --no-rest-doc
1,075
def set_weights(self, weights): params = self.weights if len(params) != len(weights): raise ValueError( "Length of the specified weight list (" + str(len(weights)) + ") does not match the number of weights " "of the optimizer (" + str(len(params)) + ")" ) weight_value_tuples = [] param_values = backend.batch_get_value(params) for pv, p, w in zip(param_values, params, weights): if pv.shape != w.shape: raise ValueError( "Optimizer weight shape " + str(pv.shape) + " not compatible with " "provided weight shape " + str(w.shape) ) weight_value_tuples.append((p, w)) backend.batch_set_value(weight_value_tuples)
Sets the weights of the optimizer, from Numpy arrays. Should only be called after computing the gradients (otherwise the optimizer has no weights). Args: weights: a list of Numpy arrays. The number of arrays and their shape must match number of the dimensions of the weights of the optimizer (i.e. it should match the output of `get_weights`). Raises: ValueError: in case of incompatible weight shapes.
65
82
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def set_weights(self, weights): params = self.weights if len(params) != len(weights): raise ValueError( "Length of the specified weight list (" + str(len(weights)) + ") does not match the number of weights " "of the optimizer (" + str(len(params)) + ")" ) weight_value_tuples = [] param_values = backend.batch_get_value(params) for pv, p, w in zip(param_values, params, weights): if pv.shape != w.shape: raise ValueError( "Optimizer weight shape " + str(pv.shape) + " not compatible with " "provided weight shape " + str(w.shape) ) weight_value_tuples.append((p, w)) backend.batch_set_value(weight_value_tuples) ``` ###Assistant : Sets the weights of the optimizer, from Numpy arrays. Should only be called after computing the gradients (otherwise the optimizer has no weights). Args: weights: a list of Numpy arrays. The number of arrays and their shape must match number of the dimensions of the weights of the optimizer (i.e. it should match the output of `get_weights`). Raises: ValueError: in case of incompatible weight shapes.
1,076
def extract_tensors_from_dataset(dataset): iterator = get_iterator(dataset) inputs, targets, sample_weight = unpack_iterator_input(iterator) return inputs, targets, sample_weight
Extract a tuple of tensors `inputs, targets, sample_weight` from a dataset. Args: dataset: Dataset instance. Returns: Tuple of tensors `x, y, weights`. `y` and `weights` entry may be None.
29
14
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def extract_tensors_from_dataset(dataset): iterator = get_iterator(dataset) inputs, targets, sample_weight = unpack_iterator_input(iterator) return inputs, targets, sample_weight ``` ###Assistant : Extract a tuple of tensors `inputs, targets, sample_weight` from a dataset. Args: dataset: Dataset instance. Returns: Tuple of tensors `x, y, weights`. `y` and `weights` entry may be None.
1,077
def _set_autocommit(self, autocommit): raise NotImplementedError( "subclasses of BaseDatabaseWrapper may require a _set_autocommit() method" ) # ##### Generic transaction management methods #####
Backend-specific implementation to enable or disable autocommit.
7
21
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _set_autocommit(self, autocommit): raise NotImplementedError( "subclasses of BaseDatabaseWrapper may require a _set_autocommit() method" ) # ##### Generic transaction management methods ##### ``` ###Assistant : Backend-specific implementation to enable or disable autocommit.
1,078
def get_edit_upload_form_context_data(self): edit_form_class = self.get_edit_form_class() return { self.context_upload_name: self.upload_object, "edit_action": reverse( self.edit_upload_url_name, args=(self.upload_object.id,) ), "delete_action": reverse( self.delete_upload_url_name, args=(self.upload_object.id,) ), "form": edit_form_class( instance=self.object, prefix="%s-%d" % (self.edit_upload_form_prefix, self.upload_object.id), user=self.request.user, ), }
Return the context data necessary for rendering the HTML form for supplying the metadata to turn an upload object into a final object
23
29
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_edit_upload_form_context_data(self): edit_form_class = self.get_edit_form_class() return { self.context_upload_name: self.upload_object, "edit_action": reverse( self.edit_upload_url_name, args=(self.upload_object.id,) ), "delete_action": reverse( self.delete_upload_url_name, args=(self.upload_object.id,) ), "form": edit_form_class( instance=self.object, prefix="%s-%d" % (self.edit_upload_form_prefix, self.upload_object.id), user=self.request.user, ), } ``` ###Assistant : Return the context data necessary for rendering the HTML form for supplying the metadata to turn an upload object into a final object
1,079
def test_overriding_has_module_permission(self): articles = Article._meta.verbose_name_plural.title() sections = Section._meta.verbose_name_plural.title() index_url = reverse("admin7:index") self.client.force_login(self.superuser) response = self.client.get(index_url) self.assertContains(response, sections) self.assertNotContains(response, articles) self.client.logout() self.client.force_login(self.viewuser) response = self.client.get(index_url) self.assertNotContains(response, "admin_views") self.assertNotContains(response, articles) self.client.logout() self.client.force_login(self.adduser) response = self.client.get(index_url) self.assertNotContains(response, "admin_views") self.assertNotContains(response, articles) self.client.logout() self.client.force_login(self.changeuser) response = self.client.get(index_url) self.assertNotContains(response, "admin_views") self.assertNotContains(response, articles) self.client.logout() self.client.force_login(self.deleteuser) response = self.client.get(index_url) self.assertNotContains(response, articles) # The app list displays Sections but not Articles as the latter has # ModelAdmin.has_module_permission() = False. self.client.force_login(self.superuser) response = self.client.get(reverse("admin7:app_list", args=("admin_views",))) self.assertContains(response, sections) self.assertNotContains(response, articles)
If has_module_permission() always returns False, the module shouldn't be displayed on the admin index page for any users.
18
79
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_overriding_has_module_permission(self): articles = Article._meta.verbose_name_plural.title() sections = Section._meta.verbose_name_plural.title() index_url = reverse("admin7:index") self.client.force_login(self.superuser) response = self.client.get(index_url) self.assertContains(response, sections) self.assertNotContains(response, articles) self.client.logout() self.client.force_login(self.viewuser) response = self.client.get(index_url) self.assertNotContains(response, "admin_views") self.assertNotContains(response, articles) self.client.logout() self.client.force_login(self.adduser) response = self.client.get(index_url) self.assertNotContains(response, "admin_views") self.assertNotContains(response, articles) self.client.logout() self.client.force_login(self.changeuser) response = self.client.get(index_url) self.assertNotContains(response, "admin_views") self.assertNotContains(response, articles) self.client.logout() self.client.force_login(self.deleteuser) response = self.client.get(index_url) self.assertNotContains(response, articles) # The app list displays Sections but not Articles as the latter has # ModelAdmin.has_module_permission() = False. self.client.force_login(self.superuser) response = self.client.get(reverse("admin7:app_list", args=("admin_views",))) self.assertContains(response, sections) self.assertNotContains(response, articles) ``` ###Assistant : If has_module_permission() always returns False, the module shouldn't be displayed on the admin index page for any users.
1,080
def non_field_errors(self): return self.errors.get( NON_FIELD_ERRORS, self.error_class(error_class="nonfield", renderer=self.renderer), )
Return an ErrorList of errors that aren't associated with a particular field -- i.e., from Form.clean(). Return an empty ErrorList if there are none.
24
8
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def non_field_errors(self): return self.errors.get( NON_FIELD_ERRORS, self.error_class(error_class="nonfield", renderer=self.renderer), ) ``` ###Assistant : Return an ErrorList of errors that aren't associated with a particular field -- i.e., from Form.clean(). Return an empty ErrorList if there are none.
1,081
def _set_gradient_checkpointing(self, module, value=False): if isinstance(module, VanModel): module.gradient_checkpointing = value VAN_START_DOCSTRING = r VAN_INPUTS_DOCSTRING = r @add_start_docstrings( "The bare VAN model outputting raw features without any specific head on top. Note, VAN does not have an embedding layer.", VAN_START_DOCSTRING, )
This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`VanConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. Args: pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): Pixel values. Pixel values can be obtained using [`AutoFeatureExtractor`]. See [`AutoFeatureExtractor.__call__`] for details. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all stages. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.
128
40
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _set_gradient_checkpointing(self, module, value=False): if isinstance(module, VanModel): module.gradient_checkpointing = value VAN_START_DOCSTRING = r VAN_INPUTS_DOCSTRING = r @add_start_docstrings( "The bare VAN model outputting raw features without any specific head on top. Note, VAN does not have an embedding layer.", VAN_START_DOCSTRING, ) ``` ###Assistant : This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) subclass. Use it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and behavior. Parameters: config ([`VanConfig`]): Model configuration class with all the parameters of the model. Initializing with a config file does not load the weights associated with the model, only the configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights. Args: pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`): Pixel values. Pixel values can be obtained using [`AutoFeatureExtractor`]. See [`AutoFeatureExtractor.__call__`] for details. output_hidden_states (`bool`, *optional*): Whether or not to return the hidden states of all stages. See `hidden_states` under returned tensors for more detail. return_dict (`bool`, *optional*): Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple.
1,082
def update(self, dest, url, rev_options): # type: (str, HiddenText, RevOptions) -> None raise NotImplementedError
Update an already-existing repo to the given ``rev_options``. Args: rev_options: a RevOptions object.
13
14
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def update(self, dest, url, rev_options): # type: (str, HiddenText, RevOptions) -> None raise NotImplementedError ``` ###Assistant : Update an already-existing repo to the given ``rev_options``. Args: rev_options: a RevOptions object.
1,083
def get_nested_field(value, field): if field == '__self__': return value fields = field.split('__') for fld in fields: if isinstance(value, list): value = [getattr(v, fld) for v in value] else: value = getattr(value, fld) return value
Get nested field from list of objects or single instance :param value: Single instance or list to look up field :param field: Field to lookup :return: List or single instance of looked up field
34
34
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_nested_field(value, field): if field == '__self__': return value fields = field.split('__') for fld in fields: if isinstance(value, list): value = [getattr(v, fld) for v in value] else: value = getattr(value, fld) return value ``` ###Assistant : Get nested field from list of objects or single instance :param value: Single instance or list to look up field :param field: Field to lookup :return: List or single instance of looked up field
1,084
def test_stylesheet_many_classes_dont_overrule_id(): css = "#id {color: red;} .a.b.c.d {color: blue;}" stylesheet = _make_stylesheet(css) node = DOMNode(classes="a b c d", id="id") stylesheet.apply(node) assert node.styles.color == Color(255, 0, 0)
#id is further to the left in the specificity tuple than class, and a selector containing multiple classes cannot take priority over even a single class.
26
27
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_stylesheet_many_classes_dont_overrule_id(): css = "#id {color: red;} .a.b.c.d {color: blue;}" stylesheet = _make_stylesheet(css) node = DOMNode(classes="a b c d", id="id") stylesheet.apply(node) assert node.styles.color == Color(255, 0, 0) ``` ###Assistant : #id is further to the left in the specificity tuple than class, and a selector containing multiple classes cannot take priority over even a single class.
1,085
def site_data_dir(self) -> str: return self._append_app_name_and_version("/Library/Application Support")
:return: data directory shared by users, e.g. ``/Library/Application Support/$appname/$version``
9
7
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def site_data_dir(self) -> str: return self._append_app_name_and_version("/Library/Application Support") ``` ###Assistant : :return: data directory shared by users, e.g. ``/Library/Application Support/$appname/$version``
1,086
def test_warm_start(): tpot_obj = TPOTClassifier( random_state=42, population_size=1, offspring_size=2, generations=1, verbosity=0, config_dict='TPOT light', warm_start=True) tpot_obj.fit(pretest_X, pretest_y) assert tpot_obj._pop is not None assert tpot_obj._pareto_front is not None first_pop = tpot_obj._pop tpot_obj.random_state = 21 tpot_obj.fit(pretest_X, pretest_y) assert tpot_obj._pop == first_pop
Assert that the TPOT warm_start flag stores the pop and pareto_front from the first run.
15
37
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_warm_start(): tpot_obj = TPOTClassifier( random_state=42, population_size=1, offspring_size=2, generations=1, verbosity=0, config_dict='TPOT light', warm_start=True) tpot_obj.fit(pretest_X, pretest_y) assert tpot_obj._pop is not None assert tpot_obj._pareto_front is not None first_pop = tpot_obj._pop tpot_obj.random_state = 21 tpot_obj.fit(pretest_X, pretest_y) assert tpot_obj._pop == first_pop ``` ###Assistant : Assert that the TPOT warm_start flag stores the pop and pareto_front from the first run.
1,087
def load_data_wiki(batch_size, max_len): num_workers = d2l.get_dataloader_workers() data_dir = d2l.download_extract('wikitext-2', 'wikitext-2') paragraphs = _read_wiki(data_dir) train_set = _WikiTextDataset(paragraphs, max_len) train_iter = gluon.data.DataLoader(train_set, batch_size, shuffle=True, num_workers=num_workers) return train_iter, train_set.vocab
Load the WikiText-2 dataset. Defined in :numref:`subsec_prepare_mlm_data`
7
26
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def load_data_wiki(batch_size, max_len): num_workers = d2l.get_dataloader_workers() data_dir = d2l.download_extract('wikitext-2', 'wikitext-2') paragraphs = _read_wiki(data_dir) train_set = _WikiTextDataset(paragraphs, max_len) train_iter = gluon.data.DataLoader(train_set, batch_size, shuffle=True, num_workers=num_workers) return train_iter, train_set.vocab ``` ###Assistant : Load the WikiText-2 dataset. Defined in :numref:`subsec_prepare_mlm_data`
1,088
def save(self, loc, **kwargs) -> Plot: # TODO expose important keyword arguments in our signature? with theme_context(self._theme_with_defaults()): self._plot().save(loc, **kwargs) return self
Compile the plot and write it to a buffer or file on disk. Parameters ---------- loc : str, path, or buffer Location on disk to save the figure, or a buffer to write into. kwargs Other keyword arguments are passed through to :meth:`matplotlib.figure.Figure.savefig`.
43
21
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def save(self, loc, **kwargs) -> Plot: # TODO expose important keyword arguments in our signature? with theme_context(self._theme_with_defaults()): self._plot().save(loc, **kwargs) return self ``` ###Assistant : Compile the plot and write it to a buffer or file on disk. Parameters ---------- loc : str, path, or buffer Location on disk to save the figure, or a buffer to write into. kwargs Other keyword arguments are passed through to :meth:`matplotlib.figure.Figure.savefig`.
1,089
def test_create_realm_no_creation_key(self) -> None: email = "[email protected]" with self.settings(OPEN_REALM_CREATION=False): # Create new realm with the email, but no creation key. result = self.client_post("/new/", {"email": email}) self.assertEqual(result.status_code, 200) self.assert_in_response("Organization creation link required", result)
Trying to create a realm without a creation_key should fail when OPEN_REALM_CREATION is false.
14
32
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_create_realm_no_creation_key(self) -> None: email = "[email protected]" with self.settings(OPEN_REALM_CREATION=False): # Create new realm with the email, but no creation key. result = self.client_post("/new/", {"email": email}) self.assertEqual(result.status_code, 200) self.assert_in_response("Organization creation link required", result) ``` ###Assistant : Trying to create a realm without a creation_key should fail when OPEN_REALM_CREATION is false.
1,090
def _get_device_names(self) -> List[str]: names = [pynvml.nvmlDeviceGetName(handle).decode("utf-8") for handle in self._handles] self._log("debug", f"GPU Devices: {names}") return names
Obtain the list of names of connected Nvidia GPUs as identified in :attr:`_handles`. Returns ------- list The list of connected Nvidia GPU names
23
17
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _get_device_names(self) -> List[str]: names = [pynvml.nvmlDeviceGetName(handle).decode("utf-8") for handle in self._handles] self._log("debug", f"GPU Devices: {names}") return names ``` ###Assistant : Obtain the list of names of connected Nvidia GPUs as identified in :attr:`_handles`. Returns ------- list The list of connected Nvidia GPU names
1,091
def test_delayed_message(self) -> None: user1 = UserID.from_string(self.user_id1) # Send a message before user2 joins event_id1 = self.create_and_send_event(self.room_id, user1) # Have user2 join the room self.helper.join(self.room_id, self.user_id2, tok=self.tok2) # They start off with no notifications self.assertEqual(self.get_notif_count(self.user_id2), 0) # Send another message that references the event before the join to # simulate a "delayed" event self.create_and_send_event(self.room_id, user1, prev_event_ids=[event_id1]) # user2 should not be notified about it, because they can't see it. self.assertEqual(self.get_notif_count(self.user_id2), 0)
Test that a delayed message that was from before a user joined doesn't cause a notification for the joined user.
20
71
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_delayed_message(self) -> None: user1 = UserID.from_string(self.user_id1) # Send a message before user2 joins event_id1 = self.create_and_send_event(self.room_id, user1) # Have user2 join the room self.helper.join(self.room_id, self.user_id2, tok=self.tok2) # They start off with no notifications self.assertEqual(self.get_notif_count(self.user_id2), 0) # Send another message that references the event before the join to # simulate a "delayed" event self.create_and_send_event(self.room_id, user1, prev_event_ids=[event_id1]) # user2 should not be notified about it, because they can't see it. self.assertEqual(self.get_notif_count(self.user_id2), 0) ``` ###Assistant : Test that a delayed message that was from before a user joined doesn't cause a notification for the joined user.
1,092
def test_glm_regression_unpenalized_hstacked_X(solver, fit_intercept, glm_dataset): model, X, y, coef, _, _, _ = glm_dataset n_samples, n_features = X.shape alpha = 0 # unpenalized params = dict( alpha=alpha, fit_intercept=fit_intercept, # solver=solver, # only lbfgs available tol=1e-12, max_iter=1000, ) model = clone(model).set_params(**params) if fit_intercept: intercept = coef[-1] coef = coef[:-1] if n_samples > n_features: X = X[:, :-1] # remove intercept X = 0.5 * np.concatenate((X, X), axis=1) else: # To know the minimum norm solution, we keep one intercept column and do # not divide by 2. Later on, we must take special care. X = np.c_[X[:, :-1], X[:, :-1], X[:, -1]] else: intercept = 0 X = 0.5 * np.concatenate((X, X), axis=1) assert np.linalg.matrix_rank(X) <= min(n_samples, n_features) with warnings.catch_warnings(): if fit_intercept and n_samples <= n_features: # XXX: Investigate if the lack of convergence in this case should be # considered a bug or not. warnings.filterwarnings("ignore", category=ConvergenceWarning) model.fit(X, y) if fit_intercept and n_samples <= n_features: # Here we take special care. model_intercept = 2 * model.intercept_ model_coef = 2 * model.coef_[:-1] # exclude the other intercept term. # For minimum norm solution, we would have # assert model.intercept_ == pytest.approx(model.coef_[-1]) else: model_intercept = model.intercept_ model_coef = model.coef_ rtol = 6e-5 if n_samples > n_features: assert model_intercept == pytest.approx(intercept) assert_allclose(model_coef, np.r_[coef, coef], rtol=1e-4) else: # As it is an underdetermined problem, prediction = y. The following shows that # we get a solution, i.e. a (non-unique) minimum of the objective function ... assert_allclose(model.predict(X), y, rtol=1e-6) if fit_intercept: # Same as in test_glm_regression_unpenalized. # But it is not the minimum norm solution. Otherwise the norms would be # equal. norm_solution = np.linalg.norm( 0.5 * np.r_[intercept, intercept, coef, coef] ) norm_model = np.linalg.norm(np.r_[model.intercept_, model.coef_]) assert norm_model > (1 + 1e-12) * norm_solution # For minimum norm solution, we would have # assert model.intercept_ == pytest.approx(model.coef_[-1]) else: assert model_intercept == pytest.approx(intercept) assert_allclose(model_coef, np.r_[coef, coef], rtol=rtol) @pytest.mark.parametrize("solver", SOLVERS) @pytest.mark.parametrize("fit_intercept", [True, False])
Test that unpenalized GLM converges for all solvers to correct solution. We work with a simple constructed data set with known solution. GLM fit on [X] is the same as fit on [X, X]/2. For long X, [X, X] is a singular matrix and we check against the minimum norm solution: min ||w||_2 subject to w = argmin deviance(X, y, w)
61
314
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_glm_regression_unpenalized_hstacked_X(solver, fit_intercept, glm_dataset): model, X, y, coef, _, _, _ = glm_dataset n_samples, n_features = X.shape alpha = 0 # unpenalized params = dict( alpha=alpha, fit_intercept=fit_intercept, # solver=solver, # only lbfgs available tol=1e-12, max_iter=1000, ) model = clone(model).set_params(**params) if fit_intercept: intercept = coef[-1] coef = coef[:-1] if n_samples > n_features: X = X[:, :-1] # remove intercept X = 0.5 * np.concatenate((X, X), axis=1) else: # To know the minimum norm solution, we keep one intercept column and do # not divide by 2. Later on, we must take special care. X = np.c_[X[:, :-1], X[:, :-1], X[:, -1]] else: intercept = 0 X = 0.5 * np.concatenate((X, X), axis=1) assert np.linalg.matrix_rank(X) <= min(n_samples, n_features) with warnings.catch_warnings(): if fit_intercept and n_samples <= n_features: # XXX: Investigate if the lack of convergence in this case should be # considered a bug or not. warnings.filterwarnings("ignore", category=ConvergenceWarning) model.fit(X, y) if fit_intercept and n_samples <= n_features: # Here we take special care. model_intercept = 2 * model.intercept_ model_coef = 2 * model.coef_[:-1] # exclude the other intercept term. # For minimum norm solution, we would have # assert model.intercept_ == pytest.approx(model.coef_[-1]) else: model_intercept = model.intercept_ model_coef = model.coef_ rtol = 6e-5 if n_samples > n_features: assert model_intercept == pytest.approx(intercept) assert_allclose(model_coef, np.r_[coef, coef], rtol=1e-4) else: # As it is an underdetermined problem, prediction = y. The following shows that # we get a solution, i.e. a (non-unique) minimum of the objective function ... assert_allclose(model.predict(X), y, rtol=1e-6) if fit_intercept: # Same as in test_glm_regression_unpenalized. # But it is not the minimum norm solution. Otherwise the norms would be # equal. norm_solution = np.linalg.norm( 0.5 * np.r_[intercept, intercept, coef, coef] ) norm_model = np.linalg.norm(np.r_[model.intercept_, model.coef_]) assert norm_model > (1 + 1e-12) * norm_solution # For minimum norm solution, we would have # assert model.intercept_ == pytest.approx(model.coef_[-1]) else: assert model_intercept == pytest.approx(intercept) assert_allclose(model_coef, np.r_[coef, coef], rtol=rtol) @pytest.mark.parametrize("solver", SOLVERS) @pytest.mark.parametrize("fit_intercept", [True, False]) ``` ###Assistant : Test that unpenalized GLM converges for all solvers to correct solution. We work with a simple constructed data set with known solution. GLM fit on [X] is the same as fit on [X, X]/2. For long X, [X, X] is a singular matrix and we check against the minimum norm solution: min ||w||_2 subject to w = argmin deviance(X, y, w)
1,093
def test_count_aggregation_threads(self) -> None: user_id, token, _, other_token, room_id = self._create_users_and_room() thread_id: str last_event_id: str
This is essentially the same test as test_count_aggregation, but adds events to the main timeline and to a thread.
19
15
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_count_aggregation_threads(self) -> None: user_id, token, _, other_token, room_id = self._create_users_and_room() thread_id: str last_event_id: str ``` ###Assistant : This is essentially the same test as test_count_aggregation, but adds events to the main timeline and to a thread.
1,094
def test_global_instantiated_before_config_load(self): cache = LruCache(100) add_resizable_cache("foo", cache_resize_callback=cache.set_cache_factor) self.assertEqual(cache.max_size, 50) config = {"caches": {"global_factor": 4}} self.config.read_config(config, config_dir_path="", data_dir_path="") self.config.resize_all_caches() self.assertEqual(cache.max_size, 400)
If a cache is instantiated before the config is read, it will be given the default cache size in the interim, and then resized to the new default cache size once the config is loaded.
35
20
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_global_instantiated_before_config_load(self): cache = LruCache(100) add_resizable_cache("foo", cache_resize_callback=cache.set_cache_factor) self.assertEqual(cache.max_size, 50) config = {"caches": {"global_factor": 4}} self.config.read_config(config, config_dir_path="", data_dir_path="") self.config.resize_all_caches() self.assertEqual(cache.max_size, 400) ``` ###Assistant : If a cache is instantiated before the config is read, it will be given the default cache size in the interim, and then resized to the new default cache size once the config is loaded.
1,095
def in_top_k(predictions, targets, k): return tf.compat.v1.math.in_top_k(predictions, targets, k) # CONVOLUTIONS
Returns whether the `targets` are in the top `k` `predictions`. Args: predictions: A tensor of shape `(batch_size, classes)` and type `float32`. targets: A 1D tensor of length `batch_size` and type `int32` or `int64`. k: An `int`, number of top elements to consider. Returns: A 1D tensor of length `batch_size` and type `bool`. `output[i]` is `True` if `predictions[i, targets[i]]` is within top-`k` values of `predictions[i]`.
64
10
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def in_top_k(predictions, targets, k): return tf.compat.v1.math.in_top_k(predictions, targets, k) # CONVOLUTIONS ``` ###Assistant : Returns whether the `targets` are in the top `k` `predictions`. Args: predictions: A tensor of shape `(batch_size, classes)` and type `float32`. targets: A 1D tensor of length `batch_size` and type `int32` or `int64`. k: An `int`, number of top elements to consider. Returns: A 1D tensor of length `batch_size` and type `bool`. `output[i]` is `True` if `predictions[i, targets[i]]` is within top-`k` values of `predictions[i]`.
1,096
def test_update_job(self, parent_job, grouped_jobs, api, batch): parent_job.update_job() # assert for job in grouped_jobs: job.update_job.assert_called_once_with(batch=batch)
Checks jobs status in advance and restart if some failed.
10
14
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_update_job(self, parent_job, grouped_jobs, api, batch): parent_job.update_job() # assert for job in grouped_jobs: job.update_job.assert_called_once_with(batch=batch) ``` ###Assistant : Checks jobs status in advance and restart if some failed.
1,097
def list_master_symlinks(saltenv=None, prefix=""): if not saltenv: saltenv = __opts__["saltenv"] or "base" return _client().symlink_list(saltenv, prefix)
.. versionchanged:: 3005 ``saltenv`` will use value from config if not explicitly set List all of the symlinks stored on the master CLI Example: .. code-block:: bash salt '*' cp.list_master_symlinks
30
14
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def list_master_symlinks(saltenv=None, prefix=""): if not saltenv: saltenv = __opts__["saltenv"] or "base" return _client().symlink_list(saltenv, prefix) ``` ###Assistant : .. versionchanged:: 3005 ``saltenv`` will use value from config if not explicitly set List all of the symlinks stored on the master CLI Example: .. code-block:: bash salt '*' cp.list_master_symlinks
1,098
def print_help(self): has_screen_tickers_start = "" if self.screen_tickers else "[unvl]" has_screen_tickers_end = "" if self.screen_tickers else "[/unvl]" help_text = f console.print(text=help_text, menu="Stocks - Options - Screener")
Print help[cmds] view view available presets (or one in particular) set set one of the available presets [/cmds] [param]PRESET: [/param]{self.preset}[cmds] scr screen data from this preset[/cmds] {has_screen_tickers_start} [param]Last screened tickers: [/param]{', '.join(self.screen_tickers)}[menu] > ca take these to comparison analysis menu > po take these to portoflio optimization menu{has_screen_tickers_end}
48
25
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def print_help(self): has_screen_tickers_start = "" if self.screen_tickers else "[unvl]" has_screen_tickers_end = "" if self.screen_tickers else "[/unvl]" help_text = f console.print(text=help_text, menu="Stocks - Options - Screener") ``` ###Assistant : Print help[cmds] view view available presets (or one in particular) set set one of the available presets [/cmds] [param]PRESET: [/param]{self.preset}[cmds] scr screen data from this preset[/cmds] {has_screen_tickers_start} [param]Last screened tickers: [/param]{', '.join(self.screen_tickers)}[menu] > ca take these to comparison analysis menu > po take these to portoflio optimization menu{has_screen_tickers_end}
1,099
def _find(self, tests, obj, name, module, source_lines, globs, seen): if self._verbose: print('Finding tests in %s' % name) # If we've already processed this object, then ignore it. if id(obj) in seen: return seen[id(obj)] = 1 # Make sure we don't run doctests for classes outside of sympy, such # as in numpy or scipy. if inspect.isclass(obj): if obj.__module__.split('.')[0] != 'sympy': return # Find a test for this object, and add it to the list of tests. test = self._get_test(obj, name, module, globs, source_lines) if test is not None: tests.append(test) if not self._recurse: return # Look for tests in a module's contained objects. if inspect.ismodule(obj): for rawname, val in obj.__dict__.items(): # Recurse to functions & classes. if inspect.isfunction(val) or inspect.isclass(val): # Make sure we don't run doctests functions or classes # from different modules if val.__module__ != module.__name__: continue assert self._from_module(module, val), \ "%s is not in module %s (rawname %s)" % (val, module, rawname) try: valname = '%s.%s' % (name, rawname) self._find(tests, val, valname, module, source_lines, globs, seen) except KeyboardInterrupt: raise # Look for tests in a module's __test__ dictionary. for valname, val in getattr(obj, '__test__', {}).items(): if not isinstance(valname, str): raise ValueError("SymPyDocTestFinder.find: __test__ keys " "must be strings: %r" % (type(valname),)) if not (inspect.isfunction(val) or inspect.isclass(val) or inspect.ismethod(val) or inspect.ismodule(val) or isinstance(val, str)): raise ValueError("SymPyDocTestFinder.find: __test__ values " "must be strings, functions, methods, " "classes, or modules: %r" % (type(val),)) valname = '%s.__test__.%s' % (name, valname) self._find(tests, val, valname, module, source_lines, globs, seen) # Look for tests in a class's contained objects. if inspect.isclass(obj): for valname, val in obj.__dict__.items(): # Special handling for staticmethod/classmethod. if isinstance(val, staticmethod): val = getattr(obj, valname) if isinstance(val, classmethod): val = getattr(obj, valname).__func__ # Recurse to methods, properties, and nested classes. if ((inspect.isfunction(unwrap(val)) or inspect.isclass(val) or isinstance(val, property)) and self._from_module(module, val)): # Make sure we don't run doctests functions or classes # from different modules if isinstance(val, property): if hasattr(val.fget, '__module__'): if val.fget.__module__ != module.__name__: continue else: if val.__module__ != module.__name__: continue assert self._from_module(module, val), \ "%s is not in module %s (valname %s)" % ( val, module, valname) valname = '%s.%s' % (name, valname) self._find(tests, val, valname, module, source_lines, globs, seen)
Find tests for the given object and any contained objects, and add them to ``tests``.
15
358
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _find(self, tests, obj, name, module, source_lines, globs, seen): if self._verbose: print('Finding tests in %s' % name) # If we've already processed this object, then ignore it. if id(obj) in seen: return seen[id(obj)] = 1 # Make sure we don't run doctests for classes outside of sympy, such # as in numpy or scipy. if inspect.isclass(obj): if obj.__module__.split('.')[0] != 'sympy': return # Find a test for this object, and add it to the list of tests. test = self._get_test(obj, name, module, globs, source_lines) if test is not None: tests.append(test) if not self._recurse: return # Look for tests in a module's contained objects. if inspect.ismodule(obj): for rawname, val in obj.__dict__.items(): # Recurse to functions & classes. if inspect.isfunction(val) or inspect.isclass(val): # Make sure we don't run doctests functions or classes # from different modules if val.__module__ != module.__name__: continue assert self._from_module(module, val), \ "%s is not in module %s (rawname %s)" % (val, module, rawname) try: valname = '%s.%s' % (name, rawname) self._find(tests, val, valname, module, source_lines, globs, seen) except KeyboardInterrupt: raise # Look for tests in a module's __test__ dictionary. for valname, val in getattr(obj, '__test__', {}).items(): if not isinstance(valname, str): raise ValueError("SymPyDocTestFinder.find: __test__ keys " "must be strings: %r" % (type(valname),)) if not (inspect.isfunction(val) or inspect.isclass(val) or inspect.ismethod(val) or inspect.ismodule(val) or isinstance(val, str)): raise ValueError("SymPyDocTestFinder.find: __test__ values " "must be strings, functions, methods, " "classes, or modules: %r" % (type(val),)) valname = '%s.__test__.%s' % (name, valname) self._find(tests, val, valname, module, source_lines, globs, seen) # Look for tests in a class's contained objects. if inspect.isclass(obj): for valname, val in obj.__dict__.items(): # Special handling for staticmethod/classmethod. if isinstance(val, staticmethod): val = getattr(obj, valname) if isinstance(val, classmethod): val = getattr(obj, valname).__func__ # Recurse to methods, properties, and nested classes. if ((inspect.isfunction(unwrap(val)) or inspect.isclass(val) or isinstance(val, property)) and self._from_module(module, val)): # Make sure we don't run doctests functions or classes # from different modules if isinstance(val, property): if hasattr(val.fget, '__module__'): if val.fget.__module__ != module.__name__: continue else: if val.__module__ != module.__name__: continue assert self._from_module(module, val), \ "%s is not in module %s (valname %s)" % ( val, module, valname) valname = '%s.%s' % (name, valname) self._find(tests, val, valname, module, source_lines, globs, seen) ``` ###Assistant : Find tests for the given object and any contained objects, and add them to ``tests``.