Unnamed: 0
int64 0
2.93k
| code
stringlengths 101
62.2k
| docs
stringlengths 51
10.7k
| doc_len
int64 4
1.74k
| words
int64 4
4.82k
| lang
stringclasses 1
value | prompt
stringlengths 320
71.2k
|
---|---|---|---|---|---|---|
1,100 | def get_mapped_key_strings_to_ints(self) -> MutableMapping[str, int]:
cache_key_results: MutableMapping[str, int] = {}
for org_id, result_dict in self.results.items():
for string, id in result_dict.items():
key = f"{org_id}:{string}"
if id is not None:
cache_key_results[key] = id
return cache_key_results
|
Return the results, but formatted as the following:
{
"1:a": 10,
"1:b": 11,
"1:c", 12,
"2:e": 13
}
This is for when we use indexer_cache.set_many()
| 25 | 33 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def get_mapped_key_strings_to_ints(self) -> MutableMapping[str, int]:
cache_key_results: MutableMapping[str, int] = {}
for org_id, result_dict in self.results.items():
for string, id in result_dict.items():
key = f"{org_id}:{string}"
if id is not None:
cache_key_results[key] = id
return cache_key_results
```
###Assistant :
Return the results, but formatted as the following:
{
"1:a": 10,
"1:b": 11,
"1:c", 12,
"2:e": 13
}
This is for when we use indexer_cache.set_many()
|
1,101 | def get_atext(value):
m = _non_atom_end_matcher(value)
if not m:
raise errors.HeaderParseError(
"expected atext but found '{}'".format(value))
atext = m.group()
value = value[len(atext):]
atext = ValueTerminal(atext, 'atext')
_validate_xtext(atext)
return atext, value
| atext = <matches _atext_matcher>
We allow any non-ATOM_ENDS in atext, but add an InvalidATextDefect to
the token's defects list if we find non-atext characters.
| 24 | 29 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def get_atext(value):
m = _non_atom_end_matcher(value)
if not m:
raise errors.HeaderParseError(
"expected atext but found '{}'".format(value))
atext = m.group()
value = value[len(atext):]
atext = ValueTerminal(atext, 'atext')
_validate_xtext(atext)
return atext, value
```
###Assistant : atext = <matches _atext_matcher>
We allow any non-ATOM_ENDS in atext, but add an InvalidATextDefect to
the token's defects list if we find non-atext characters.
|
1,102 | def _get_linenumber_pos(self, lineno):
return (self.image_pad, self._get_line_y(lineno))
|
Get the actual position for the start of a line number.
| 11 | 6 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def _get_linenumber_pos(self, lineno):
return (self.image_pad, self._get_line_y(lineno))
```
###Assistant :
Get the actual position for the start of a line number.
|
1,103 | def test_image_inside_paragraph(self):
# In Draftail's data model, images are block-level elements and therefore
# split up preceding / following text into their own paragraphs
converter = ContentstateConverter(features=["image"])
result = json.loads(
converter.from_database_format(
)
)
self.assertContentStateEqual(
result,
{
"blocks": [
{
"key": "00000",
"inlineStyleRanges": [],
"entityRanges": [],
"depth": 0,
"text": "before",
"type": "unstyled",
},
{
"key": "00000",
"inlineStyleRanges": [],
"entityRanges": [{"key": 0, "offset": 0, "length": 1}],
"depth": 0,
"text": " ",
"type": "atomic",
},
{
"key": "00000",
"inlineStyleRanges": [],
"entityRanges": [],
"depth": 0,
"text": "after",
"type": "unstyled",
},
],
"entityMap": {
"0": {
"data": {
"format": "left",
"alt": "an image",
"id": "1",
"src": "/media/not-found",
},
"mutability": "IMMUTABLE",
"type": "IMAGE",
}
},
},
)
|
<p>before <embed embedtype="image" alt="an image" id="1" format="left" /> after</p>
| 9 | 111 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def test_image_inside_paragraph(self):
# In Draftail's data model, images are block-level elements and therefore
# split up preceding / following text into their own paragraphs
converter = ContentstateConverter(features=["image"])
result = json.loads(
converter.from_database_format(
)
)
self.assertContentStateEqual(
result,
{
"blocks": [
{
"key": "00000",
"inlineStyleRanges": [],
"entityRanges": [],
"depth": 0,
"text": "before",
"type": "unstyled",
},
{
"key": "00000",
"inlineStyleRanges": [],
"entityRanges": [{"key": 0, "offset": 0, "length": 1}],
"depth": 0,
"text": " ",
"type": "atomic",
},
{
"key": "00000",
"inlineStyleRanges": [],
"entityRanges": [],
"depth": 0,
"text": "after",
"type": "unstyled",
},
],
"entityMap": {
"0": {
"data": {
"format": "left",
"alt": "an image",
"id": "1",
"src": "/media/not-found",
},
"mutability": "IMMUTABLE",
"type": "IMAGE",
}
},
},
)
```
###Assistant :
<p>before <embed embedtype="image" alt="an image" id="1" format="left" /> after</p>
|
1,104 | def l1_min_c(X, y, *, loss="squared_hinge", fit_intercept=True, intercept_scaling=1.0):
if loss not in ("squared_hinge", "log"):
raise ValueError('loss type not in ("squared_hinge", "log")')
X = check_array(X, accept_sparse="csc")
check_consistent_length(X, y)
Y = LabelBinarizer(neg_label=-1).fit_transform(y).T
# maximum absolute value over classes and features
den = np.max(np.abs(safe_sparse_dot(Y, X)))
if fit_intercept:
bias = np.full(
(np.size(y), 1), intercept_scaling, dtype=np.array(intercept_scaling).dtype
)
den = max(den, abs(np.dot(Y, bias)).max())
if den == 0.0:
raise ValueError(
"Ill-posed l1_min_c calculation: l1 will always "
"select zero coefficients for this data"
)
if loss == "squared_hinge":
return 0.5 / den
else: # loss == 'log':
return 2.0 / den
| Return the lowest bound for C.
The lower bound for C is computed such that for C in (l1_min_C, infinity)
the model is guaranteed not to be empty. This applies to l1 penalized
classifiers, such as LinearSVC with penalty='l1' and
linear_model.LogisticRegression with penalty='l1'.
This value is valid if class_weight parameter in fit() is not set.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training vector, where `n_samples` is the number of samples and
`n_features` is the number of features.
y : array-like of shape (n_samples,)
Target vector relative to X.
loss : {'squared_hinge', 'log'}, default='squared_hinge'
Specifies the loss function.
With 'squared_hinge' it is the squared hinge loss (a.k.a. L2 loss).
With 'log' it is the loss of logistic regression models.
fit_intercept : bool, default=True
Specifies if the intercept should be fitted by the model.
It must match the fit() method parameter.
intercept_scaling : float, default=1.0
When fit_intercept is True, instance vector x becomes
[x, intercept_scaling],
i.e. a "synthetic" feature with constant value equals to
intercept_scaling is appended to the instance vector.
It must match the fit() method parameter.
Returns
-------
l1_min_c : float
Minimum value for C.
| 190 | 93 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def l1_min_c(X, y, *, loss="squared_hinge", fit_intercept=True, intercept_scaling=1.0):
if loss not in ("squared_hinge", "log"):
raise ValueError('loss type not in ("squared_hinge", "log")')
X = check_array(X, accept_sparse="csc")
check_consistent_length(X, y)
Y = LabelBinarizer(neg_label=-1).fit_transform(y).T
# maximum absolute value over classes and features
den = np.max(np.abs(safe_sparse_dot(Y, X)))
if fit_intercept:
bias = np.full(
(np.size(y), 1), intercept_scaling, dtype=np.array(intercept_scaling).dtype
)
den = max(den, abs(np.dot(Y, bias)).max())
if den == 0.0:
raise ValueError(
"Ill-posed l1_min_c calculation: l1 will always "
"select zero coefficients for this data"
)
if loss == "squared_hinge":
return 0.5 / den
else: # loss == 'log':
return 2.0 / den
```
###Assistant : Return the lowest bound for C.
The lower bound for C is computed such that for C in (l1_min_C, infinity)
the model is guaranteed not to be empty. This applies to l1 penalized
classifiers, such as LinearSVC with penalty='l1' and
linear_model.LogisticRegression with penalty='l1'.
This value is valid if class_weight parameter in fit() is not set.
Parameters
----------
X : {array-like, sparse matrix} of shape (n_samples, n_features)
Training vector, where `n_samples` is the number of samples and
`n_features` is the number of features.
y : array-like of shape (n_samples,)
Target vector relative to X.
loss : {'squared_hinge', 'log'}, default='squared_hinge'
Specifies the loss function.
With 'squared_hinge' it is the squared hinge loss (a.k.a. L2 loss).
With 'log' it is the loss of logistic regression models.
fit_intercept : bool, default=True
Specifies if the intercept should be fitted by the model.
It must match the fit() method parameter.
intercept_scaling : float, default=1.0
When fit_intercept is True, instance vector x becomes
[x, intercept_scaling],
i.e. a "synthetic" feature with constant value equals to
intercept_scaling is appended to the instance vector.
It must match the fit() method parameter.
Returns
-------
l1_min_c : float
Minimum value for C.
|
1,105 | def _mysql_tables_where_indexes_already_present(conn):
to_check = [
('xcom', 'idx_xcom_task_instance'),
('task_reschedule', 'idx_task_reschedule_dag_run'),
('task_fail', 'idx_task_fail_task_instance'),
]
tables = set()
for tbl, idx in to_check:
if conn.execute(f"show indexes from {tbl} where Key_name = '{idx}'").first():
tables.add(tbl)
return tables
|
If user downgraded and is upgrading again, we have to check for existing
indexes on mysql because we can't (and don't) drop them as part of the
downgrade.
| 28 | 32 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def _mysql_tables_where_indexes_already_present(conn):
to_check = [
('xcom', 'idx_xcom_task_instance'),
('task_reschedule', 'idx_task_reschedule_dag_run'),
('task_fail', 'idx_task_fail_task_instance'),
]
tables = set()
for tbl, idx in to_check:
if conn.execute(f"show indexes from {tbl} where Key_name = '{idx}'").first():
tables.add(tbl)
return tables
```
###Assistant :
If user downgraded and is upgrading again, we have to check for existing
indexes on mysql because we can't (and don't) drop them as part of the
downgrade.
|
1,106 | def test_webclient_resolves_with_client_resource(self):
for resource_name_order_list in [
["webclient", "client"],
["client", "webclient"],
]:
# Create a dictionary from path regex -> resource
resource_dict: Dict[str, Resource] = {}
for resource_name in resource_name_order_list:
resource_dict.update(
SynapseHomeServer._configure_named_resource(self.hs, resource_name)
)
# Create a root resource which ties the above resources together into one
root_resource = Resource()
create_resource_tree(resource_dict, root_resource)
# Create a site configured with this resource to make HTTP requests against
listener_config = ListenerConfig(
port=8008,
bind_addresses=["127.0.0.1"],
type="http",
http_options=HttpListenerConfig(
resources=[HttpResourceConfig(names=resource_name_order_list)]
),
)
test_site = SynapseSite(
logger_name="synapse.access.http.fake",
site_tag=self.hs.config.server.server_name,
config=listener_config,
resource=root_resource,
server_version_string="1",
max_request_body_size=1234,
reactor=self.reactor,
)
# Attempt to make requests to endpoints on both the webclient and client resources
# on test_site.
self._request_client_and_webclient_resources(test_site)
|
Tests that both client and webclient resources can be accessed simultaneously.
This is a regression test created in response to https://github.com/matrix-org/synapse/issues/11763.
| 21 | 103 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def test_webclient_resolves_with_client_resource(self):
for resource_name_order_list in [
["webclient", "client"],
["client", "webclient"],
]:
# Create a dictionary from path regex -> resource
resource_dict: Dict[str, Resource] = {}
for resource_name in resource_name_order_list:
resource_dict.update(
SynapseHomeServer._configure_named_resource(self.hs, resource_name)
)
# Create a root resource which ties the above resources together into one
root_resource = Resource()
create_resource_tree(resource_dict, root_resource)
# Create a site configured with this resource to make HTTP requests against
listener_config = ListenerConfig(
port=8008,
bind_addresses=["127.0.0.1"],
type="http",
http_options=HttpListenerConfig(
resources=[HttpResourceConfig(names=resource_name_order_list)]
),
)
test_site = SynapseSite(
logger_name="synapse.access.http.fake",
site_tag=self.hs.config.server.server_name,
config=listener_config,
resource=root_resource,
server_version_string="1",
max_request_body_size=1234,
reactor=self.reactor,
)
# Attempt to make requests to endpoints on both the webclient and client resources
# on test_site.
self._request_client_and_webclient_resources(test_site)
```
###Assistant :
Tests that both client and webclient resources can be accessed simultaneously.
This is a regression test created in response to https://github.com/matrix-org/synapse/issues/11763.
|
1,107 | def unregister_pickle_by_value(module):
if not isinstance(module, types.ModuleType):
raise ValueError(f"Input should be a module object, got {str(module)} instead")
if module.__name__ not in _PICKLE_BY_VALUE_MODULES:
raise ValueError(f"{module} is not registered for pickle by value")
else:
_PICKLE_BY_VALUE_MODULES.remove(module.__name__)
| Unregister that the input module should be pickled by value. | 10 | 32 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def unregister_pickle_by_value(module):
if not isinstance(module, types.ModuleType):
raise ValueError(f"Input should be a module object, got {str(module)} instead")
if module.__name__ not in _PICKLE_BY_VALUE_MODULES:
raise ValueError(f"{module} is not registered for pickle by value")
else:
_PICKLE_BY_VALUE_MODULES.remove(module.__name__)
```
###Assistant : Unregister that the input module should be pickled by value.
|
1,108 | def test_get_bad_permissions(self):
# Remove privileges from user
self.user.is_superuser = False
self.user.user_permissions.add(
Permission.objects.get(
content_type__app_label="wagtailadmin", codename="access_admin"
)
)
self.user.save()
# Get
response = self.client.get(
reverse("wagtailimages:url_generator", args=(self.image.id,))
)
# Check response
self.assertRedirects(response, reverse("wagtailadmin_home"))
|
This tests that the view returns a "permission denied" redirect if a user without correct
permissions attempts to access it
| 20 | 30 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def test_get_bad_permissions(self):
# Remove privileges from user
self.user.is_superuser = False
self.user.user_permissions.add(
Permission.objects.get(
content_type__app_label="wagtailadmin", codename="access_admin"
)
)
self.user.save()
# Get
response = self.client.get(
reverse("wagtailimages:url_generator", args=(self.image.id,))
)
# Check response
self.assertRedirects(response, reverse("wagtailadmin_home"))
```
###Assistant :
This tests that the view returns a "permission denied" redirect if a user without correct
permissions attempts to access it
|
1,109 | def get_archive_formats():
formats = [(name, registry[2]) for name, registry in
_ARCHIVE_FORMATS.items()]
formats.sort()
return formats
| Returns a list of supported formats for archiving and unarchiving.
Each element of the returned sequence is a tuple (name, description)
| 21 | 14 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def get_archive_formats():
formats = [(name, registry[2]) for name, registry in
_ARCHIVE_FORMATS.items()]
formats.sort()
return formats
```
###Assistant : Returns a list of supported formats for archiving and unarchiving.
Each element of the returned sequence is a tuple (name, description)
|
1,110 | def _handle_transforms(self, element, mobject):
if element.hasAttribute("x") and element.hasAttribute("y"):
x = self._attribute_to_float(element.getAttribute("x"))
# Flip y
y = -self._attribute_to_float(element.getAttribute("y"))
mobject.shift(x * RIGHT + y * UP)
transform_attr_value = element.getAttribute("transform")
# parse the various transforms in the attribute value
transform_names = ["matrix", "translate", "scale", "rotate", "skewX", "skewY"]
# Borrowed/Inspired from:
# https://github.com/cjlano/svg/blob/3ea3384457c9780fa7d67837c9c5fd4ebc42cb3b/svg/svg.py#L75
# match any SVG transformation with its parameter (until final parenthesis)
# [^)]* == anything but a closing parenthesis
# '|'.join == OR-list of SVG transformations
transform_regex = "|".join([x + r"[^)]*\)" for x in transform_names])
transforms = re.findall(transform_regex, transform_attr_value)
number_regex = r"[-+]?(?:\d+(?:\.\d*)?|\.\d+)(?:[eE][-+]?\d+)?"
for t in transforms:
op_name, op_args = t.split("(")
op_name = op_name.strip()
op_args = [float(x) for x in re.findall(number_regex, op_args)]
if op_name == "matrix":
transform_args = np.array(op_args).reshape([3, 2])
x = transform_args[2][0]
y = -transform_args[2][1]
matrix = np.identity(self.dim)
matrix[:2, :2] = transform_args[:2, :]
matrix[1] *= -1
matrix[:, 1] *= -1
for mob in mobject.family_members_with_points():
if config["renderer"] == "opengl":
mob.points = np.dot(mob.points, matrix)
else:
mob.points = np.dot(mob.points, matrix)
mobject.shift(x * RIGHT + y * UP)
elif op_name == "scale":
scale_values = op_args
if len(scale_values) == 2:
scale_x, scale_y = scale_values
mobject.scale(np.array([scale_x, scale_y, 1]), about_point=ORIGIN)
elif len(scale_values) == 1:
scale = scale_values[0]
mobject.scale(np.array([scale, scale, 1]), about_point=ORIGIN)
elif op_name == "translate":
if len(op_args) == 2:
x, y = op_args
else:
x = op_args
y = 0
mobject.shift(x * RIGHT + y * DOWN)
else:
# TODO: handle rotate, skewX and skewY
# for now adding a warning message
logger.warning(
"Handling of %s transform is not supported yet!",
op_name,
)
| Applies the SVG transform to the specified mobject. Transforms include:
``matrix``, ``translate``, and ``scale``.
Parameters
----------
element : :class:`minidom.Element`
The transform command to perform
mobject : :class:`Mobject`
The Mobject to transform.
| 31 | 245 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def _handle_transforms(self, element, mobject):
if element.hasAttribute("x") and element.hasAttribute("y"):
x = self._attribute_to_float(element.getAttribute("x"))
# Flip y
y = -self._attribute_to_float(element.getAttribute("y"))
mobject.shift(x * RIGHT + y * UP)
transform_attr_value = element.getAttribute("transform")
# parse the various transforms in the attribute value
transform_names = ["matrix", "translate", "scale", "rotate", "skewX", "skewY"]
# Borrowed/Inspired from:
# https://github.com/cjlano/svg/blob/3ea3384457c9780fa7d67837c9c5fd4ebc42cb3b/svg/svg.py#L75
# match any SVG transformation with its parameter (until final parenthesis)
# [^)]* == anything but a closing parenthesis
# '|'.join == OR-list of SVG transformations
transform_regex = "|".join([x + r"[^)]*\)" for x in transform_names])
transforms = re.findall(transform_regex, transform_attr_value)
number_regex = r"[-+]?(?:\d+(?:\.\d*)?|\.\d+)(?:[eE][-+]?\d+)?"
for t in transforms:
op_name, op_args = t.split("(")
op_name = op_name.strip()
op_args = [float(x) for x in re.findall(number_regex, op_args)]
if op_name == "matrix":
transform_args = np.array(op_args).reshape([3, 2])
x = transform_args[2][0]
y = -transform_args[2][1]
matrix = np.identity(self.dim)
matrix[:2, :2] = transform_args[:2, :]
matrix[1] *= -1
matrix[:, 1] *= -1
for mob in mobject.family_members_with_points():
if config["renderer"] == "opengl":
mob.points = np.dot(mob.points, matrix)
else:
mob.points = np.dot(mob.points, matrix)
mobject.shift(x * RIGHT + y * UP)
elif op_name == "scale":
scale_values = op_args
if len(scale_values) == 2:
scale_x, scale_y = scale_values
mobject.scale(np.array([scale_x, scale_y, 1]), about_point=ORIGIN)
elif len(scale_values) == 1:
scale = scale_values[0]
mobject.scale(np.array([scale, scale, 1]), about_point=ORIGIN)
elif op_name == "translate":
if len(op_args) == 2:
x, y = op_args
else:
x = op_args
y = 0
mobject.shift(x * RIGHT + y * DOWN)
else:
# TODO: handle rotate, skewX and skewY
# for now adding a warning message
logger.warning(
"Handling of %s transform is not supported yet!",
op_name,
)
```
###Assistant : Applies the SVG transform to the specified mobject. Transforms include:
``matrix``, ``translate``, and ``scale``.
Parameters
----------
element : :class:`minidom.Element`
The transform command to perform
mobject : :class:`Mobject`
The Mobject to transform.
|
1,111 | def pro_data_fixture():
return json.loads(load_fixture("data.json", "airvisual_pro"))
@pytest.fixture(name="pro") | Define an update coordinator data example for the Pro. | 9 | 6 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def pro_data_fixture():
return json.loads(load_fixture("data.json", "airvisual_pro"))
@pytest.fixture(name="pro")
```
###Assistant : Define an update coordinator data example for the Pro.
|
1,112 | def fetch_species_distributions(*, data_home=None, download_if_missing=True):
data_home = get_data_home(data_home)
if not exists(data_home):
makedirs(data_home)
# Define parameters for the data files. These should not be changed
# unless the data model changes. They will be saved in the npz file
# with the downloaded data.
extra_params = dict(
x_left_lower_corner=-94.8,
Nx=1212,
y_left_lower_corner=-56.05,
Ny=1592,
grid_size=0.05,
)
dtype = np.int16
archive_path = _pkl_filepath(data_home, DATA_ARCHIVE_NAME)
if not exists(archive_path):
if not download_if_missing:
raise IOError("Data not found and `download_if_missing` is False")
logger.info("Downloading species data from %s to %s" % (SAMPLES.url, data_home))
samples_path = _fetch_remote(SAMPLES, dirname=data_home)
with np.load(samples_path) as X: # samples.zip is a valid npz
for f in X.files:
fhandle = BytesIO(X[f])
if "train" in f:
train = _load_csv(fhandle)
if "test" in f:
test = _load_csv(fhandle)
remove(samples_path)
logger.info(
"Downloading coverage data from %s to %s" % (COVERAGES.url, data_home)
)
coverages_path = _fetch_remote(COVERAGES, dirname=data_home)
with np.load(coverages_path) as X: # coverages.zip is a valid npz
coverages = []
for f in X.files:
fhandle = BytesIO(X[f])
logger.debug(" - converting {}".format(f))
coverages.append(_load_coverage(fhandle))
coverages = np.asarray(coverages, dtype=dtype)
remove(coverages_path)
bunch = Bunch(coverages=coverages, test=test, train=train, **extra_params)
joblib.dump(bunch, archive_path, compress=9)
else:
bunch = joblib.load(archive_path)
return bunch
| Loader for species distribution dataset from Phillips et. al. (2006).
Read more in the :ref:`User Guide <datasets>`.
Parameters
----------
data_home : str, default=None
Specify another download and cache folder for the datasets. By default
all scikit-learn data is stored in '~/scikit_learn_data' subfolders.
download_if_missing : bool, default=True
If False, raise a IOError if the data is not locally available
instead of trying to download the data from the source site.
Returns
-------
data : :class:`~sklearn.utils.Bunch`
Dictionary-like object, with the following attributes.
coverages : array, shape = [14, 1592, 1212]
These represent the 14 features measured
at each point of the map grid.
The latitude/longitude values for the grid are discussed below.
Missing data is represented by the value -9999.
train : record array, shape = (1624,)
The training points for the data. Each point has three fields:
- train['species'] is the species name
- train['dd long'] is the longitude, in degrees
- train['dd lat'] is the latitude, in degrees
test : record array, shape = (620,)
The test points for the data. Same format as the training data.
Nx, Ny : integers
The number of longitudes (x) and latitudes (y) in the grid
x_left_lower_corner, y_left_lower_corner : floats
The (x,y) position of the lower-left corner, in degrees
grid_size : float
The spacing between points of the grid, in degrees
Notes
-----
This dataset represents the geographic distribution of species.
The dataset is provided by Phillips et. al. (2006).
The two species are:
- `"Bradypus variegatus"
<http://www.iucnredlist.org/details/3038/0>`_ ,
the Brown-throated Sloth.
- `"Microryzomys minutus"
<http://www.iucnredlist.org/details/13408/0>`_ ,
also known as the Forest Small Rice Rat, a rodent that lives in Peru,
Colombia, Ecuador, Peru, and Venezuela.
- For an example of using this dataset with scikit-learn, see
:ref:`examples/applications/plot_species_distribution_modeling.py
<sphx_glr_auto_examples_applications_plot_species_distribution_modeling.py>`.
References
----------
* `"Maximum entropy modeling of species geographic distributions"
<http://rob.schapire.net/papers/ecolmod.pdf>`_
S. J. Phillips, R. P. Anderson, R. E. Schapire - Ecological Modelling,
190:231-259, 2006.
| 310 | 179 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def fetch_species_distributions(*, data_home=None, download_if_missing=True):
data_home = get_data_home(data_home)
if not exists(data_home):
makedirs(data_home)
# Define parameters for the data files. These should not be changed
# unless the data model changes. They will be saved in the npz file
# with the downloaded data.
extra_params = dict(
x_left_lower_corner=-94.8,
Nx=1212,
y_left_lower_corner=-56.05,
Ny=1592,
grid_size=0.05,
)
dtype = np.int16
archive_path = _pkl_filepath(data_home, DATA_ARCHIVE_NAME)
if not exists(archive_path):
if not download_if_missing:
raise IOError("Data not found and `download_if_missing` is False")
logger.info("Downloading species data from %s to %s" % (SAMPLES.url, data_home))
samples_path = _fetch_remote(SAMPLES, dirname=data_home)
with np.load(samples_path) as X: # samples.zip is a valid npz
for f in X.files:
fhandle = BytesIO(X[f])
if "train" in f:
train = _load_csv(fhandle)
if "test" in f:
test = _load_csv(fhandle)
remove(samples_path)
logger.info(
"Downloading coverage data from %s to %s" % (COVERAGES.url, data_home)
)
coverages_path = _fetch_remote(COVERAGES, dirname=data_home)
with np.load(coverages_path) as X: # coverages.zip is a valid npz
coverages = []
for f in X.files:
fhandle = BytesIO(X[f])
logger.debug(" - converting {}".format(f))
coverages.append(_load_coverage(fhandle))
coverages = np.asarray(coverages, dtype=dtype)
remove(coverages_path)
bunch = Bunch(coverages=coverages, test=test, train=train, **extra_params)
joblib.dump(bunch, archive_path, compress=9)
else:
bunch = joblib.load(archive_path)
return bunch
```
###Assistant : Loader for species distribution dataset from Phillips et. al. (2006).
Read more in the :ref:`User Guide <datasets>`.
Parameters
----------
data_home : str, default=None
Specify another download and cache folder for the datasets. By default
all scikit-learn data is stored in '~/scikit_learn_data' subfolders.
download_if_missing : bool, default=True
If False, raise a IOError if the data is not locally available
instead of trying to download the data from the source site.
Returns
-------
data : :class:`~sklearn.utils.Bunch`
Dictionary-like object, with the following attributes.
coverages : array, shape = [14, 1592, 1212]
These represent the 14 features measured
at each point of the map grid.
The latitude/longitude values for the grid are discussed below.
Missing data is represented by the value -9999.
train : record array, shape = (1624,)
The training points for the data. Each point has three fields:
- train['species'] is the species name
- train['dd long'] is the longitude, in degrees
- train['dd lat'] is the latitude, in degrees
test : record array, shape = (620,)
The test points for the data. Same format as the training data.
Nx, Ny : integers
The number of longitudes (x) and latitudes (y) in the grid
x_left_lower_corner, y_left_lower_corner : floats
The (x,y) position of the lower-left corner, in degrees
grid_size : float
The spacing between points of the grid, in degrees
Notes
-----
This dataset represents the geographic distribution of species.
The dataset is provided by Phillips et. al. (2006).
The two species are:
- `"Bradypus variegatus"
<http://www.iucnredlist.org/details/3038/0>`_ ,
the Brown-throated Sloth.
- `"Microryzomys minutus"
<http://www.iucnredlist.org/details/13408/0>`_ ,
also known as the Forest Small Rice Rat, a rodent that lives in Peru,
Colombia, Ecuador, Peru, and Venezuela.
- For an example of using this dataset with scikit-learn, see
:ref:`examples/applications/plot_species_distribution_modeling.py
<sphx_glr_auto_examples_applications_plot_species_distribution_modeling.py>`.
References
----------
* `"Maximum entropy modeling of species geographic distributions"
<http://rob.schapire.net/papers/ecolmod.pdf>`_
S. J. Phillips, R. P. Anderson, R. E. Schapire - Ecological Modelling,
190:231-259, 2006.
|
1,113 | def set_split_factor(factor, dev=None):
assert 0 <= factor
global split_factors
dev = ivy.default(dev, default_device())
split_factors[dev] = factor
# noinspection PyShadowingNames |
Set the global split factor for a given device, which can be used to scale batch splitting chunk sizes for the
device across the codebase.
:param factor: The factor to set the device-specific split factor to.
:type factor: float
:param dev: The device to set the split factor for. Sets the default device by default.
:type dev: str, optional
| 59 | 19 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def set_split_factor(factor, dev=None):
assert 0 <= factor
global split_factors
dev = ivy.default(dev, default_device())
split_factors[dev] = factor
# noinspection PyShadowingNames
```
###Assistant :
Set the global split factor for a given device, which can be used to scale batch splitting chunk sizes for the
device across the codebase.
:param factor: The factor to set the device-specific split factor to.
:type factor: float
:param dev: The device to set the split factor for. Sets the default device by default.
:type dev: str, optional
|
1,114 | def run(self, test, compileflags=None, out=None, clear_globs=True):
self.test = test
# Remove ``` from the end of example, which may appear in Markdown
# files
for example in test.examples:
example.want = example.want.replace('```\n', '')
example.exc_msg = example.exc_msg and example.exc_msg.replace('```\n', '')
if compileflags is None:
compileflags = pdoctest._extract_future_flags(test.globs)
save_stdout = sys.stdout
if out is None:
out = save_stdout.write
sys.stdout = self._fakeout
# Patch pdb.set_trace to restore sys.stdout during interactive
# debugging (so it's not still redirected to self._fakeout).
# Note that the interactive output will go to *our*
# save_stdout, even if that's not the real sys.stdout; this
# allows us to write test cases for the set_trace behavior.
save_set_trace = pdb.set_trace
self.debugger = pdoctest._OutputRedirectingPdb(save_stdout)
self.debugger.reset()
pdb.set_trace = self.debugger.set_trace
# Patch linecache.getlines, so we can see the example's source
# when we're inside the debugger.
self.save_linecache_getlines = pdoctest.linecache.getlines
linecache.getlines = self.__patched_linecache_getlines
# Fail for deprecation warnings
with raise_on_deprecated():
try:
return self.__run(test, compileflags, out)
finally:
sys.stdout = save_stdout
pdb.set_trace = save_set_trace
linecache.getlines = self.save_linecache_getlines
if clear_globs:
test.globs.clear()
# We have to override the name mangled methods.
monkeypatched_methods = [
'patched_linecache_getlines',
'run',
'record_outcome'
]
for method in monkeypatched_methods:
oldname = '_DocTestRunner__' + method
newname = '_SymPyDocTestRunner__' + method
setattr(SymPyDocTestRunner, newname, getattr(DocTestRunner, oldname))
|
Run the examples in ``test``, and display the results using the
writer function ``out``.
The examples are run in the namespace ``test.globs``. If
``clear_globs`` is true (the default), then this namespace will
be cleared after the test runs, to help with garbage
collection. If you would like to examine the namespace after
the test completes, then use ``clear_globs=False``.
``compileflags`` gives the set of flags that should be used by
the Python compiler when running the examples. If not
specified, then it will default to the set of future-import
flags that apply to ``globs``.
The output of each example is checked using
``SymPyDocTestRunner.check_output``, and the results are
formatted by the ``SymPyDocTestRunner.report_*`` methods.
| 111 | 197 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def run(self, test, compileflags=None, out=None, clear_globs=True):
self.test = test
# Remove ``` from the end of example, which may appear in Markdown
# files
for example in test.examples:
example.want = example.want.replace('```\n', '')
example.exc_msg = example.exc_msg and example.exc_msg.replace('```\n', '')
if compileflags is None:
compileflags = pdoctest._extract_future_flags(test.globs)
save_stdout = sys.stdout
if out is None:
out = save_stdout.write
sys.stdout = self._fakeout
# Patch pdb.set_trace to restore sys.stdout during interactive
# debugging (so it's not still redirected to self._fakeout).
# Note that the interactive output will go to *our*
# save_stdout, even if that's not the real sys.stdout; this
# allows us to write test cases for the set_trace behavior.
save_set_trace = pdb.set_trace
self.debugger = pdoctest._OutputRedirectingPdb(save_stdout)
self.debugger.reset()
pdb.set_trace = self.debugger.set_trace
# Patch linecache.getlines, so we can see the example's source
# when we're inside the debugger.
self.save_linecache_getlines = pdoctest.linecache.getlines
linecache.getlines = self.__patched_linecache_getlines
# Fail for deprecation warnings
with raise_on_deprecated():
try:
return self.__run(test, compileflags, out)
finally:
sys.stdout = save_stdout
pdb.set_trace = save_set_trace
linecache.getlines = self.save_linecache_getlines
if clear_globs:
test.globs.clear()
# We have to override the name mangled methods.
monkeypatched_methods = [
'patched_linecache_getlines',
'run',
'record_outcome'
]
for method in monkeypatched_methods:
oldname = '_DocTestRunner__' + method
newname = '_SymPyDocTestRunner__' + method
setattr(SymPyDocTestRunner, newname, getattr(DocTestRunner, oldname))
```
###Assistant :
Run the examples in ``test``, and display the results using the
writer function ``out``.
The examples are run in the namespace ``test.globs``. If
``clear_globs`` is true (the default), then this namespace will
be cleared after the test runs, to help with garbage
collection. If you would like to examine the namespace after
the test completes, then use ``clear_globs=False``.
``compileflags`` gives the set of flags that should be used by
the Python compiler when running the examples. If not
specified, then it will default to the set of future-import
flags that apply to ``globs``.
The output of each example is checked using
``SymPyDocTestRunner.check_output``, and the results are
formatted by the ``SymPyDocTestRunner.report_*`` methods.
|
1,115 | def _fetch_all_variants(client, variables={}, permissions=None):
query =
response = client.post_graphql(
query, variables, permissions=permissions, check_no_permissions=False
)
content = get_graphql_content(response)
return content["data"]["productVariants"]
|
query fetchAllVariants($channel: String) {
productVariants(first: 10, channel: $channel) {
totalCount
edges {
node {
id
}
}
}
}
| 19 | 19 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def _fetch_all_variants(client, variables={}, permissions=None):
query =
response = client.post_graphql(
query, variables, permissions=permissions, check_no_permissions=False
)
content = get_graphql_content(response)
return content["data"]["productVariants"]
```
###Assistant :
query fetchAllVariants($channel: String) {
productVariants(first: 10, channel: $channel) {
totalCount
edges {
node {
id
}
}
}
}
|
1,116 | def astar_torus(grid, start_node, goal_node):
colors = ['white', 'black', 'red', 'pink', 'yellow', 'green', 'orange']
levels = [0, 1, 2, 3, 4, 5, 6, 7]
cmap, norm = from_levels_and_colors(levels, colors)
grid[start_node] = 4
grid[goal_node] = 5
parent_map = [[() for _ in range(M)] for _ in range(M)]
heuristic_map = calc_heuristic_map(M, goal_node)
explored_heuristic_map = np.full((M, M), np.inf)
distance_map = np.full((M, M), np.inf)
explored_heuristic_map[start_node] = heuristic_map[start_node]
distance_map[start_node] = 0
while True:
grid[start_node] = 4
grid[goal_node] = 5
current_node = np.unravel_index(
np.argmin(explored_heuristic_map, axis=None), explored_heuristic_map.shape)
min_distance = np.min(explored_heuristic_map)
if (current_node == goal_node) or np.isinf(min_distance):
break
grid[current_node] = 2
explored_heuristic_map[current_node] = np.inf
i, j = current_node[0], current_node[1]
neighbors = find_neighbors(i, j)
for neighbor in neighbors:
if grid[neighbor] == 0 or grid[neighbor] == 5:
distance_map[neighbor] = distance_map[current_node] + 1
explored_heuristic_map[neighbor] = heuristic_map[neighbor]
parent_map[neighbor[0]][neighbor[1]] = current_node
grid[neighbor] = 3
if np.isinf(explored_heuristic_map[goal_node]):
route = []
print("No route found.")
else:
route = [goal_node]
while parent_map[route[0][0]][route[0][1]] != ():
route.insert(0, parent_map[route[0][0]][route[0][1]])
print("The route found covers %d grid cells." % len(route))
for i in range(1, len(route)):
grid[route[i]] = 6
plt.cla()
# for stopping simulation with the esc key.
plt.gcf().canvas.mpl_connect('key_release_event',
lambda event: [exit(0) if event.key == 'escape' else None])
plt.imshow(grid, cmap=cmap, norm=norm, interpolation=None)
plt.show()
plt.pause(1e-2)
return route
|
Finds a path between an initial and goal joint configuration using
the A* Algorithm on a tororiadal grid.
Args:
grid: An occupancy grid (ndarray)
start_node: Initial joint configuration (tuple)
goal_node: Goal joint configuration (tuple)
Returns:
Obstacle-free route in joint space from start_node to goal_node
| 44 | 192 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def astar_torus(grid, start_node, goal_node):
colors = ['white', 'black', 'red', 'pink', 'yellow', 'green', 'orange']
levels = [0, 1, 2, 3, 4, 5, 6, 7]
cmap, norm = from_levels_and_colors(levels, colors)
grid[start_node] = 4
grid[goal_node] = 5
parent_map = [[() for _ in range(M)] for _ in range(M)]
heuristic_map = calc_heuristic_map(M, goal_node)
explored_heuristic_map = np.full((M, M), np.inf)
distance_map = np.full((M, M), np.inf)
explored_heuristic_map[start_node] = heuristic_map[start_node]
distance_map[start_node] = 0
while True:
grid[start_node] = 4
grid[goal_node] = 5
current_node = np.unravel_index(
np.argmin(explored_heuristic_map, axis=None), explored_heuristic_map.shape)
min_distance = np.min(explored_heuristic_map)
if (current_node == goal_node) or np.isinf(min_distance):
break
grid[current_node] = 2
explored_heuristic_map[current_node] = np.inf
i, j = current_node[0], current_node[1]
neighbors = find_neighbors(i, j)
for neighbor in neighbors:
if grid[neighbor] == 0 or grid[neighbor] == 5:
distance_map[neighbor] = distance_map[current_node] + 1
explored_heuristic_map[neighbor] = heuristic_map[neighbor]
parent_map[neighbor[0]][neighbor[1]] = current_node
grid[neighbor] = 3
if np.isinf(explored_heuristic_map[goal_node]):
route = []
print("No route found.")
else:
route = [goal_node]
while parent_map[route[0][0]][route[0][1]] != ():
route.insert(0, parent_map[route[0][0]][route[0][1]])
print("The route found covers %d grid cells." % len(route))
for i in range(1, len(route)):
grid[route[i]] = 6
plt.cla()
# for stopping simulation with the esc key.
plt.gcf().canvas.mpl_connect('key_release_event',
lambda event: [exit(0) if event.key == 'escape' else None])
plt.imshow(grid, cmap=cmap, norm=norm, interpolation=None)
plt.show()
plt.pause(1e-2)
return route
```
###Assistant :
Finds a path between an initial and goal joint configuration using
the A* Algorithm on a tororiadal grid.
Args:
grid: An occupancy grid (ndarray)
start_node: Initial joint configuration (tuple)
goal_node: Goal joint configuration (tuple)
Returns:
Obstacle-free route in joint space from start_node to goal_node
|
1,117 | def root_node(self) -> Optional[str]:
if len(self.graph.nodes) < 1:
return None
return list(self.graph.nodes)[0] # List conversion is required, see networkx docs
|
Returns the root node of the pipeline's graph.
| 8 | 20 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def root_node(self) -> Optional[str]:
if len(self.graph.nodes) < 1:
return None
return list(self.graph.nodes)[0] # List conversion is required, see networkx docs
```
###Assistant :
Returns the root node of the pipeline's graph.
|
1,118 | def as_coefficients_dict(self, *syms):
if not syms:
d = defaultdict(list)
for ai in self.args:
c, m = ai.as_coeff_Mul()
d[m].append(c)
for k, v in d.items():
if len(v) == 1:
d[k] = v[0]
else:
d[k] = Add(*v)
di = defaultdict(int)
di.update(d)
return di
else:
d = defaultdict(list)
ind, dep = self.as_independent(*syms, as_Add=True)
for i in Add.make_args(dep):
c, x = i.as_independent(*syms, as_Add=False)
d[x].append(c)
d = {k: Add(*d[k]) for k in d}
d.update({S.One: ind})
return d
| Return a dictionary mapping terms to their Rational coefficient.
Since the dictionary is a defaultdict, inquiries about terms which
were not present will return a coefficient of 0. If an expression is
not an Add it is considered to have a single term.
If symbols `syms` are provided, any multiplicative terms
independent of them will be considered a coefficient and a
regular dictionary of syms-dependent generators as keys and
their corresponding coefficients as values will be returned.
Examples
========
>>> from sympy import exp
>>> from sympy.abc import a, x
>>> (3*x + a*x + 4).as_coefficients_dict()
{1: 4, x: 3, a*x: 1}
>>> _[a]
0
>>> (3*a*x).as_coefficients_dict()
{a*x: 3}
>>> (3*exp(x)*x + a/x + 2).as_coefficients_dict(x)
{1: 2, 1/x: a, x*exp(x): 3}
| 121 | 71 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def as_coefficients_dict(self, *syms):
if not syms:
d = defaultdict(list)
for ai in self.args:
c, m = ai.as_coeff_Mul()
d[m].append(c)
for k, v in d.items():
if len(v) == 1:
d[k] = v[0]
else:
d[k] = Add(*v)
di = defaultdict(int)
di.update(d)
return di
else:
d = defaultdict(list)
ind, dep = self.as_independent(*syms, as_Add=True)
for i in Add.make_args(dep):
c, x = i.as_independent(*syms, as_Add=False)
d[x].append(c)
d = {k: Add(*d[k]) for k in d}
d.update({S.One: ind})
return d
```
###Assistant : Return a dictionary mapping terms to their Rational coefficient.
Since the dictionary is a defaultdict, inquiries about terms which
were not present will return a coefficient of 0. If an expression is
not an Add it is considered to have a single term.
If symbols `syms` are provided, any multiplicative terms
independent of them will be considered a coefficient and a
regular dictionary of syms-dependent generators as keys and
their corresponding coefficients as values will be returned.
Examples
========
>>> from sympy import exp
>>> from sympy.abc import a, x
>>> (3*x + a*x + 4).as_coefficients_dict()
{1: 4, x: 3, a*x: 1}
>>> _[a]
0
>>> (3*a*x).as_coefficients_dict()
{a*x: 3}
>>> (3*exp(x)*x + a/x + 2).as_coefficients_dict(x)
{1: 2, 1/x: a, x*exp(x): 3}
|
1,119 | def verify_emoji_code_foreign_keys(self) -> None:
dct = {}
for row in RealmEmoji.objects.all():
dct[row.id] = row
if not dct:
raise AssertionError("test needs RealmEmoji rows")
count = 0
for row in Reaction.objects.filter(reaction_type=Reaction.REALM_EMOJI):
realm_emoji_id = int(row.emoji_code)
assert realm_emoji_id in dct
self.assertEqual(dct[realm_emoji_id].name, row.emoji_name)
self.assertEqual(dct[realm_emoji_id].realm_id, row.user_profile.realm_id)
count += 1
for row in UserStatus.objects.filter(reaction_type=UserStatus.REALM_EMOJI):
realm_emoji_id = int(row.emoji_code)
assert realm_emoji_id in dct
self.assertEqual(dct[realm_emoji_id].name, row.emoji_name)
self.assertEqual(dct[realm_emoji_id].realm_id, row.user_profile.realm_id)
count += 1
if count == 0:
raise AssertionError("test is meaningless without any pertinent rows")
|
DB tables that refer to RealmEmoji use int(emoji_code) as the
foreign key. Those tables tend to de-normalize emoji_name due
to our inheritance-based setup. This helper makes sure those
invariants are intact, which is particularly tricky during
the import/export process (or during conversions from things
like Slack/RocketChat/MatterMost/etc.).
| 46 | 73 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def verify_emoji_code_foreign_keys(self) -> None:
dct = {}
for row in RealmEmoji.objects.all():
dct[row.id] = row
if not dct:
raise AssertionError("test needs RealmEmoji rows")
count = 0
for row in Reaction.objects.filter(reaction_type=Reaction.REALM_EMOJI):
realm_emoji_id = int(row.emoji_code)
assert realm_emoji_id in dct
self.assertEqual(dct[realm_emoji_id].name, row.emoji_name)
self.assertEqual(dct[realm_emoji_id].realm_id, row.user_profile.realm_id)
count += 1
for row in UserStatus.objects.filter(reaction_type=UserStatus.REALM_EMOJI):
realm_emoji_id = int(row.emoji_code)
assert realm_emoji_id in dct
self.assertEqual(dct[realm_emoji_id].name, row.emoji_name)
self.assertEqual(dct[realm_emoji_id].realm_id, row.user_profile.realm_id)
count += 1
if count == 0:
raise AssertionError("test is meaningless without any pertinent rows")
```
###Assistant :
DB tables that refer to RealmEmoji use int(emoji_code) as the
foreign key. Those tables tend to de-normalize emoji_name due
to our inheritance-based setup. This helper makes sure those
invariants are intact, which is particularly tricky during
the import/export process (or during conversions from things
like Slack/RocketChat/MatterMost/etc.).
|
1,120 | def format_string_to_json(balance_info):
Working Account|KES|481000.00|481000.00|0.00|0.00
balance_dict = frappe._dict()
for account_info in balance_info.split("&"):
account_info = account_info.split("|")
balance_dict[account_info[0]] = dict(
current_balance=fmt_money(account_info[2], currency="KES"),
available_balance=fmt_money(account_info[3], currency="KES"),
reserved_balance=fmt_money(account_info[4], currency="KES"),
uncleared_balance=fmt_money(account_info[5], currency="KES"),
)
return dumps(balance_dict)
|
Format string to json.
e.g:
=> {'Working Account': {'current_balance': '481000.00',
'available_balance': '481000.00',
'reserved_balance': '0.00',
'uncleared_balance': '0.00'}}
| 16 | 28 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def format_string_to_json(balance_info):
Working Account|KES|481000.00|481000.00|0.00|0.00
balance_dict = frappe._dict()
for account_info in balance_info.split("&"):
account_info = account_info.split("|")
balance_dict[account_info[0]] = dict(
current_balance=fmt_money(account_info[2], currency="KES"),
available_balance=fmt_money(account_info[3], currency="KES"),
reserved_balance=fmt_money(account_info[4], currency="KES"),
uncleared_balance=fmt_money(account_info[5], currency="KES"),
)
return dumps(balance_dict)
```
###Assistant :
Format string to json.
e.g:
=> {'Working Account': {'current_balance': '481000.00',
'available_balance': '481000.00',
'reserved_balance': '0.00',
'uncleared_balance': '0.00'}}
|
1,121 | def _validate_path(self) -> list[Any]:
msg = (
"xpath does not return any nodes or attributes. "
"Be sure to specify in `xpath` the parent nodes of "
"children and attributes to parse. "
"If document uses namespaces denoted with "
"xmlns, be sure to define namespaces and "
"use them in xpath."
)
try:
elems = self.xml_doc.findall(self.xpath, namespaces=self.namespaces)
children = [ch for el in elems for ch in el.findall("*")]
attrs = {k: v for el in elems for k, v in el.attrib.items()}
if elems is None:
raise ValueError(msg)
if elems is not None:
if self.elems_only and children == []:
raise ValueError(msg)
if self.attrs_only and attrs == {}:
raise ValueError(msg)
if children == [] and attrs == {}:
raise ValueError(msg)
except (KeyError, SyntaxError):
raise SyntaxError(
"You have used an incorrect or unsupported XPath "
"expression for etree library or you used an "
"undeclared namespace prefix."
)
return elems
|
Notes
-----
`etree` supports limited XPath. If user attempts a more complex
expression syntax error will raise.
| 17 | 148 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def _validate_path(self) -> list[Any]:
msg = (
"xpath does not return any nodes or attributes. "
"Be sure to specify in `xpath` the parent nodes of "
"children and attributes to parse. "
"If document uses namespaces denoted with "
"xmlns, be sure to define namespaces and "
"use them in xpath."
)
try:
elems = self.xml_doc.findall(self.xpath, namespaces=self.namespaces)
children = [ch for el in elems for ch in el.findall("*")]
attrs = {k: v for el in elems for k, v in el.attrib.items()}
if elems is None:
raise ValueError(msg)
if elems is not None:
if self.elems_only and children == []:
raise ValueError(msg)
if self.attrs_only and attrs == {}:
raise ValueError(msg)
if children == [] and attrs == {}:
raise ValueError(msg)
except (KeyError, SyntaxError):
raise SyntaxError(
"You have used an incorrect or unsupported XPath "
"expression for etree library or you used an "
"undeclared namespace prefix."
)
return elems
```
###Assistant :
Notes
-----
`etree` supports limited XPath. If user attempts a more complex
expression syntax error will raise.
|
1,122 | def mask(self, row_labels, col_labels):
new_obj = super().mask(row_labels, col_labels)
if isinstance(row_labels, slice) and isinstance(
self._length_cache, ObjectIDType
):
new_obj._length_cache = compute_sliced_len.remote(
row_labels, self._length_cache
)
if isinstance(col_labels, slice) and isinstance(
self._width_cache, ObjectIDType
):
new_obj._width_cache = compute_sliced_len.remote(
col_labels, self._width_cache
)
return new_obj
|
Lazily create a mask that extracts the indices provided.
Parameters
----------
row_labels : list-like, slice or label
The row labels for the rows to extract.
col_labels : list-like, slice or label
The column labels for the columns to extract.
Returns
-------
PandasOnRayDataframePartition
A new ``PandasOnRayDataframePartition`` object.
| 46 | 38 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def mask(self, row_labels, col_labels):
new_obj = super().mask(row_labels, col_labels)
if isinstance(row_labels, slice) and isinstance(
self._length_cache, ObjectIDType
):
new_obj._length_cache = compute_sliced_len.remote(
row_labels, self._length_cache
)
if isinstance(col_labels, slice) and isinstance(
self._width_cache, ObjectIDType
):
new_obj._width_cache = compute_sliced_len.remote(
col_labels, self._width_cache
)
return new_obj
```
###Assistant :
Lazily create a mask that extracts the indices provided.
Parameters
----------
row_labels : list-like, slice or label
The row labels for the rows to extract.
col_labels : list-like, slice or label
The column labels for the columns to extract.
Returns
-------
PandasOnRayDataframePartition
A new ``PandasOnRayDataframePartition`` object.
|
1,123 | def postprocess(data_out, label_list, top_k):
output = []
for result in data_out:
result_i = softmax(result)
output_i = {}
indexs = np.argsort(result_i)[::-1][0:top_k]
for index in indexs:
label = label_list[index].split(',')[0]
output_i[label] = float(result_i[index])
output.append(output_i)
return output
|
Postprocess output of network, one image at a time.
Args:
data_out (numpy.ndarray): output data of network.
label_list (list): list of label.
top_k (int): Return top k results.
| 27 | 33 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def postprocess(data_out, label_list, top_k):
output = []
for result in data_out:
result_i = softmax(result)
output_i = {}
indexs = np.argsort(result_i)[::-1][0:top_k]
for index in indexs:
label = label_list[index].split(',')[0]
output_i[label] = float(result_i[index])
output.append(output_i)
return output
```
###Assistant :
Postprocess output of network, one image at a time.
Args:
data_out (numpy.ndarray): output data of network.
label_list (list): list of label.
top_k (int): Return top k results.
|
1,124 | def requests(self):
if hasattr(self, '_requests'):
return self._requests
else:
if not hasattr(self, 'requests_by_class'):
self.requests_by_class = {}
if self.__class__.__name__ not in self.requests_by_class:
self.requests_by_class[self.__class__.__name__] = {}
# we need to copy so that different instances with different (requests) in input do not disturb one another
self._requests = copy.copy(self.requests_by_class[self.__class__.__name__])
return self._requests
|
Get the request dictionary corresponding to this specific class
:return: Returns the requests corresponding to the specific Executor instance class
| 20 | 47 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def requests(self):
if hasattr(self, '_requests'):
return self._requests
else:
if not hasattr(self, 'requests_by_class'):
self.requests_by_class = {}
if self.__class__.__name__ not in self.requests_by_class:
self.requests_by_class[self.__class__.__name__] = {}
# we need to copy so that different instances with different (requests) in input do not disturb one another
self._requests = copy.copy(self.requests_by_class[self.__class__.__name__])
return self._requests
```
###Assistant :
Get the request dictionary corresponding to this specific class
:return: Returns the requests corresponding to the specific Executor instance class
|
1,125 | def __call__(self, *args, **kwargs):
r
result = super().__call__(*args, **kwargs)
if isinstance(args[0], list) and all(isinstance(el, str) for el in args[0]):
return [res[0] for res in result]
return result
|
Generate the output text(s) using text(s) given as inputs.
Args:
args (`str` or `List[str]`):
Input text for the encoder.
return_tensors (`bool`, *optional*, defaults to `False`):
Whether or not to include the tensors of predictions (as token indices) in the outputs.
return_text (`bool`, *optional*, defaults to `True`):
Whether or not to include the decoded texts in the outputs.
clean_up_tokenization_spaces (`bool`, *optional*, defaults to `False`):
Whether or not to clean up the potential extra spaces in the text output.
truncation (`TruncationStrategy`, *optional*, defaults to `TruncationStrategy.DO_NOT_TRUNCATE`):
The truncation strategy for the tokenization within the pipeline. `TruncationStrategy.DO_NOT_TRUNCATE`
(default) will never truncate, but it is sometimes desirable to truncate the input to fit the model's
max_length instead of throwing an error down the line.
generate_kwargs:
Additional keyword arguments to pass along to the generate method of the model (see the generate method
corresponding to your framework [here](./model#generative-models)).
Return:
A list or a list of list of `dict`: Each result comes as a dictionary with the following keys:
- **generated_text** (`str`, present when `return_text=True`) -- The generated text.
- **generated_token_ids** (`torch.Tensor` or `tf.Tensor`, present when `return_tensors=True`) -- The token
ids of the generated text.
| 188 | 27 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def __call__(self, *args, **kwargs):
r
result = super().__call__(*args, **kwargs)
if isinstance(args[0], list) and all(isinstance(el, str) for el in args[0]):
return [res[0] for res in result]
return result
```
###Assistant :
Generate the output text(s) using text(s) given as inputs.
Args:
args (`str` or `List[str]`):
Input text for the encoder.
return_tensors (`bool`, *optional*, defaults to `False`):
Whether or not to include the tensors of predictions (as token indices) in the outputs.
return_text (`bool`, *optional*, defaults to `True`):
Whether or not to include the decoded texts in the outputs.
clean_up_tokenization_spaces (`bool`, *optional*, defaults to `False`):
Whether or not to clean up the potential extra spaces in the text output.
truncation (`TruncationStrategy`, *optional*, defaults to `TruncationStrategy.DO_NOT_TRUNCATE`):
The truncation strategy for the tokenization within the pipeline. `TruncationStrategy.DO_NOT_TRUNCATE`
(default) will never truncate, but it is sometimes desirable to truncate the input to fit the model's
max_length instead of throwing an error down the line.
generate_kwargs:
Additional keyword arguments to pass along to the generate method of the model (see the generate method
corresponding to your framework [here](./model#generative-models)).
Return:
A list or a list of list of `dict`: Each result comes as a dictionary with the following keys:
- **generated_text** (`str`, present when `return_text=True`) -- The generated text.
- **generated_token_ids** (`torch.Tensor` or `tf.Tensor`, present when `return_tensors=True`) -- The token
ids of the generated text.
|
1,126 | def rewind_body(prepared_request):
body_seek = getattr(prepared_request.body, "seek", None)
if body_seek is not None and isinstance(
prepared_request._body_position, integer_types
):
try:
body_seek(prepared_request._body_position)
except OSError:
raise UnrewindableBodyError(
"An error occurred when rewinding request body for redirect."
)
else:
raise UnrewindableBodyError("Unable to rewind request body for redirect.")
| Move file pointer back to its recorded starting position
so it can be read again on redirect.
| 17 | 42 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def rewind_body(prepared_request):
body_seek = getattr(prepared_request.body, "seek", None)
if body_seek is not None and isinstance(
prepared_request._body_position, integer_types
):
try:
body_seek(prepared_request._body_position)
except OSError:
raise UnrewindableBodyError(
"An error occurred when rewinding request body for redirect."
)
else:
raise UnrewindableBodyError("Unable to rewind request body for redirect.")
```
###Assistant : Move file pointer back to its recorded starting position
so it can be read again on redirect.
|
1,127 | def nested_concat(tensors, new_tensors, padding_index=-100):
assert type(tensors) == type(
new_tensors
), f"Expected `tensors` and `new_tensors` to have the same type but found {type(tensors)} and {type(new_tensors)}."
if isinstance(tensors, (list, tuple)):
return type(tensors)(nested_concat(
t, n, padding_index=padding_index)
for t, n in zip(tensors, new_tensors))
elif isinstance(tensors, paddle.Tensor):
return paddle_pad_and_concatenate(
tensors, new_tensors, padding_index=padding_index)
elif isinstance(tensors, np.ndarray):
return numpy_pad_and_concatenate(
tensors, new_tensors, padding_index=padding_index)
else:
raise TypeError(
f"Unsupported type for concatenation: got {type(tensors)}")
|
Concat the `new_tensors` to `tensors` on the first dim and pad them on the second if needed. Works for tensors or
nested list/tuples of tensors.
| 25 | 64 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def nested_concat(tensors, new_tensors, padding_index=-100):
assert type(tensors) == type(
new_tensors
), f"Expected `tensors` and `new_tensors` to have the same type but found {type(tensors)} and {type(new_tensors)}."
if isinstance(tensors, (list, tuple)):
return type(tensors)(nested_concat(
t, n, padding_index=padding_index)
for t, n in zip(tensors, new_tensors))
elif isinstance(tensors, paddle.Tensor):
return paddle_pad_and_concatenate(
tensors, new_tensors, padding_index=padding_index)
elif isinstance(tensors, np.ndarray):
return numpy_pad_and_concatenate(
tensors, new_tensors, padding_index=padding_index)
else:
raise TypeError(
f"Unsupported type for concatenation: got {type(tensors)}")
```
###Assistant :
Concat the `new_tensors` to `tensors` on the first dim and pad them on the second if needed. Works for tensors or
nested list/tuples of tensors.
|
1,128 | def kubernetes_manifest():
template = Template(
(prefect.__module_path__ / "cli" / "templates" / "kubernetes.yaml").read_text()
)
manifest = template.substitute(
{
"image_name": get_prefect_image_name(),
}
)
print(manifest)
|
Generates a kubernetes manifest for to deploy Orion to a cluster.
Example:
$ prefect orion kubernetes-manifest | kubectl apply -f -
| 21 | 22 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def kubernetes_manifest():
template = Template(
(prefect.__module_path__ / "cli" / "templates" / "kubernetes.yaml").read_text()
)
manifest = template.substitute(
{
"image_name": get_prefect_image_name(),
}
)
print(manifest)
```
###Assistant :
Generates a kubernetes manifest for to deploy Orion to a cluster.
Example:
$ prefect orion kubernetes-manifest | kubectl apply -f -
|
1,129 | def test_queries_when_requested_project_is_head_of_trace(self, mock_query, mock_querybuilder):
# Case A: Head of trace project
self.login_as(self.user)
heart = self.create_project(
name="Heart", slug="heart", teams=[self.team], fire_project_created=True
)
mock_query.side_effect = [
{"data": [{"count()": 1000}]},
]
mock_querybuilder.side_effect = [
{
"data": [
{
"trace": "6503ee33b7bc43aead1facaa625a5dba",
"id": "6ddc83ee612b4e89b95b5278c8fd188f",
"random_number() AS random_number": 4255299100,
"is_root": 1,
},
{
"trace": "6503ee33b7bc43aead1facaa625a5dba",
"id": "0b127a578f8440c793f9ba1de595229f",
"random_number() AS random_number": 3976019453,
"is_root": 1,
},
]
},
{
"data": [
{
"project": self.project.slug,
"project_id": self.project.id,
"count": 2,
"root_count": 2,
},
{
"project": heart.slug,
"project_id": heart.id,
"count": 1,
"root_count": 0,
},
]
},
]
end_time = timezone.now()
start_time = end_time - timedelta(hours=1)
query = "environment:dev"
requested_sample_size = 2
calls = self.generate_fetch_transactions_count_query(
query, start_time, end_time, requested_sample_size
)
snuba_query_random_transactions = random_transactions_snuba_query(
query, requested_sample_size, start_time, end_time, self.project
)
snuba_query_project_stats = project_stats_snuba_query(
query,
start_time,
end_time,
self.project,
trace_ids=["6503ee33b7bc43aead1facaa625a5dba"] * 2,
)
with Feature({"organizations:server-side-sampling": True}):
response = self.client.get(
f"{self.endpoint}?sampleSize={requested_sample_size}&query={query}"
)
assert response.status_code == 200
assert mock_query.mock_calls == calls
assert len(mock_querybuilder.call_args_list) == 2
self.assert_mocked_query_calls(
snuba_query_random_transactions, snuba_query_project_stats, mock_querybuilder
)
response_data = response.json()
assert response_data["projectBreakdown"] == [
{"project_id": self.project.id, "project": self.project.slug, "count()": 2},
{"project_id": heart.id, "project": heart.slug, "count()": 1},
]
assert response_data["parentProjectBreakdown"] == [
{"project": self.project.slug, "projectId": self.project.id, "percentage": 1.0}
]
|
Case A: Requesting for a project (bar) that is root but is a head of distributed traces
Example of smart query response (DYNAMIC_SAMPLING_DISTRIBUTION_FETCH_PROJECT_STATS):
|---------+-------+------|
| project | count | root |
|---------+-------+------|
| bar | 100 | 100 |
| heart | 5 | 0 |
|---------+-------+------|
| 47 | 183 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def test_queries_when_requested_project_is_head_of_trace(self, mock_query, mock_querybuilder):
# Case A: Head of trace project
self.login_as(self.user)
heart = self.create_project(
name="Heart", slug="heart", teams=[self.team], fire_project_created=True
)
mock_query.side_effect = [
{"data": [{"count()": 1000}]},
]
mock_querybuilder.side_effect = [
{
"data": [
{
"trace": "6503ee33b7bc43aead1facaa625a5dba",
"id": "6ddc83ee612b4e89b95b5278c8fd188f",
"random_number() AS random_number": 4255299100,
"is_root": 1,
},
{
"trace": "6503ee33b7bc43aead1facaa625a5dba",
"id": "0b127a578f8440c793f9ba1de595229f",
"random_number() AS random_number": 3976019453,
"is_root": 1,
},
]
},
{
"data": [
{
"project": self.project.slug,
"project_id": self.project.id,
"count": 2,
"root_count": 2,
},
{
"project": heart.slug,
"project_id": heart.id,
"count": 1,
"root_count": 0,
},
]
},
]
end_time = timezone.now()
start_time = end_time - timedelta(hours=1)
query = "environment:dev"
requested_sample_size = 2
calls = self.generate_fetch_transactions_count_query(
query, start_time, end_time, requested_sample_size
)
snuba_query_random_transactions = random_transactions_snuba_query(
query, requested_sample_size, start_time, end_time, self.project
)
snuba_query_project_stats = project_stats_snuba_query(
query,
start_time,
end_time,
self.project,
trace_ids=["6503ee33b7bc43aead1facaa625a5dba"] * 2,
)
with Feature({"organizations:server-side-sampling": True}):
response = self.client.get(
f"{self.endpoint}?sampleSize={requested_sample_size}&query={query}"
)
assert response.status_code == 200
assert mock_query.mock_calls == calls
assert len(mock_querybuilder.call_args_list) == 2
self.assert_mocked_query_calls(
snuba_query_random_transactions, snuba_query_project_stats, mock_querybuilder
)
response_data = response.json()
assert response_data["projectBreakdown"] == [
{"project_id": self.project.id, "project": self.project.slug, "count()": 2},
{"project_id": heart.id, "project": heart.slug, "count()": 1},
]
assert response_data["parentProjectBreakdown"] == [
{"project": self.project.slug, "projectId": self.project.id, "percentage": 1.0}
]
```
###Assistant :
Case A: Requesting for a project (bar) that is root but is a head of distributed traces
Example of smart query response (DYNAMIC_SAMPLING_DISTRIBUTION_FETCH_PROJECT_STATS):
|---------+-------+------|
| project | count | root |
|---------+-------+------|
| bar | 100 | 100 |
| heart | 5 | 0 |
|---------+-------+------|
|
1,130 | def validate_leave_allocation_against_leave_application(ledger):
leave_application_records = frappe.db.sql_list(
,
(ledger.employee, ledger.leave_type, ledger.from_date, ledger.to_date),
)
if leave_application_records:
frappe.throw(
_("Leave allocation {0} is linked with the Leave Application {1}").format(
ledger.transaction_name, ", ".join(leave_application_records)
)
)
| Checks that leave allocation has no leave application against it
SELECT transaction_name
FROM `tabLeave Ledger Entry`
WHERE
employee=%s
AND leave_type=%s
AND transaction_type='Leave Application'
AND from_date>=%s
AND to_date<=%s
| 27 | 29 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def validate_leave_allocation_against_leave_application(ledger):
leave_application_records = frappe.db.sql_list(
,
(ledger.employee, ledger.leave_type, ledger.from_date, ledger.to_date),
)
if leave_application_records:
frappe.throw(
_("Leave allocation {0} is linked with the Leave Application {1}").format(
ledger.transaction_name, ", ".join(leave_application_records)
)
)
```
###Assistant : Checks that leave allocation has no leave application against it
SELECT transaction_name
FROM `tabLeave Ledger Entry`
WHERE
employee=%s
AND leave_type=%s
AND transaction_type='Leave Application'
AND from_date>=%s
AND to_date<=%s
|
1,131 | def dce_rpc_endianess(pkt):
try:
endianness = pkt.underlayer.endian
except AttributeError:
# handle the case where a PNIO class is
# built without its DCE-RPC under-layer
# i.e there is no endianness indication
return "!"
if endianness == 0: # big endian
return ">"
elif endianness == 1: # little endian
return "<"
else:
return "!"
| determine the symbol for the endianness of a the DCE/RPC | 10 | 53 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def dce_rpc_endianess(pkt):
try:
endianness = pkt.underlayer.endian
except AttributeError:
# handle the case where a PNIO class is
# built without its DCE-RPC under-layer
# i.e there is no endianness indication
return "!"
if endianness == 0: # big endian
return ">"
elif endianness == 1: # little endian
return "<"
else:
return "!"
```
###Assistant : determine the symbol for the endianness of a the DCE/RPC
|
1,132 | def _get_extra_hosts(self, docker_client) -> Dict[str, str]:
if sys.platform == "linux" and (
# Do not warn if the user has specified a host manually that does not use
# a local address
"PREFECT_API_URL" not in self.env
or re.search(
".*(localhost)|(127.0.0.1)|(host.docker.internal).*",
self.env["PREFECT_API_URL"],
)
):
user_version = packaging.version.parse(docker_client.version()["Version"])
required_version = packaging.version.parse("20.10.0")
if user_version < required_version:
warnings.warn(
"`host.docker.internal` could not be automatically resolved to your "
"local ip address. This feature is not supported on Docker Engine "
f"v{user_version}, upgrade to v{required_version}+ if you "
"encounter issues."
)
return {}
else:
# Compatibility for linux -- https://github.com/docker/cli/issues/2290
# Only supported by Docker v20.10.0+ which is our minimum recommend version
return {"host.docker.internal": "host-gateway"}
|
A host.docker.internal -> host-gateway mapping is necessary for communicating
with the API on Linux machines. Docker Desktop on macOS will automatically
already have this mapping.
| 25 | 108 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def _get_extra_hosts(self, docker_client) -> Dict[str, str]:
if sys.platform == "linux" and (
# Do not warn if the user has specified a host manually that does not use
# a local address
"PREFECT_API_URL" not in self.env
or re.search(
".*(localhost)|(127.0.0.1)|(host.docker.internal).*",
self.env["PREFECT_API_URL"],
)
):
user_version = packaging.version.parse(docker_client.version()["Version"])
required_version = packaging.version.parse("20.10.0")
if user_version < required_version:
warnings.warn(
"`host.docker.internal` could not be automatically resolved to your "
"local ip address. This feature is not supported on Docker Engine "
f"v{user_version}, upgrade to v{required_version}+ if you "
"encounter issues."
)
return {}
else:
# Compatibility for linux -- https://github.com/docker/cli/issues/2290
# Only supported by Docker v20.10.0+ which is our minimum recommend version
return {"host.docker.internal": "host-gateway"}
```
###Assistant :
A host.docker.internal -> host-gateway mapping is necessary for communicating
with the API on Linux machines. Docker Desktop on macOS will automatically
already have this mapping.
|
1,133 | def test_del_store():
with patch("salt.modules.win_certutil.get_cert_serial") as cert_serial_mock:
cmd_mock = MagicMock(
return_value=(
"CertInfo\r\n"
"================ Certificate 0 ================\r\n"
"Serial Number: 180720d39cd2db3244ba037417241e90\r\n"
"OtherStuff"
)
)
cache_mock = MagicMock(return_value="/tmp/cert.cer")
cert_serial_mock.return_value = "ABCDEF"
with patch.dict(
certutil.__salt__, {"cmd.run": cmd_mock, "cp.cache_file": cache_mock}
), patch("os.path.exists", MagicMock(return_value=True)):
certutil.del_store("salt://path/to/file", "TrustedPublisher")
cmd_mock.assert_called_once_with(
'certutil.exe -delstore TrustedPublisher "ABCDEF"'
)
cache_mock.assert_called_once_with("salt://path/to/file", "base")
|
Test removing a certificate to a specific store
| 8 | 47 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def test_del_store():
with patch("salt.modules.win_certutil.get_cert_serial") as cert_serial_mock:
cmd_mock = MagicMock(
return_value=(
"CertInfo\r\n"
"================ Certificate 0 ================\r\n"
"Serial Number: 180720d39cd2db3244ba037417241e90\r\n"
"OtherStuff"
)
)
cache_mock = MagicMock(return_value="/tmp/cert.cer")
cert_serial_mock.return_value = "ABCDEF"
with patch.dict(
certutil.__salt__, {"cmd.run": cmd_mock, "cp.cache_file": cache_mock}
), patch("os.path.exists", MagicMock(return_value=True)):
certutil.del_store("salt://path/to/file", "TrustedPublisher")
cmd_mock.assert_called_once_with(
'certutil.exe -delstore TrustedPublisher "ABCDEF"'
)
cache_mock.assert_called_once_with("salt://path/to/file", "base")
```
###Assistant :
Test removing a certificate to a specific store
|
1,134 | def _select_backend(config):
backend_arg = config.getoption('--qute-backend')
backend_env = os.environ.get('QUTE_TESTS_BACKEND')
backend = backend_arg or backend_env or _auto_select_backend()
# Fail early if selected backend is not available
# pylint: disable=unused-import
if backend == 'webkit':
import PyQt5.QtWebKitWidgets
elif backend == 'webengine':
import PyQt5.QtWebEngineWidgets
else:
raise utils.Unreachable(backend)
return backend
| Select the backend for running tests.
The backend is auto-selected in the following manner:
1. Use QtWebKit if available
2. Otherwise use QtWebEngine as a fallback
Auto-selection is overridden by either passing a backend via
`--qute-backend=<backend>` or setting the environment variable
`QUTE_TESTS_BACKEND=<backend>`.
Args:
config: pytest config
Raises:
ImportError if the selected backend is not available.
Returns:
The selected backend as a string (e.g. 'webkit').
| 64 | 44 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def _select_backend(config):
backend_arg = config.getoption('--qute-backend')
backend_env = os.environ.get('QUTE_TESTS_BACKEND')
backend = backend_arg or backend_env or _auto_select_backend()
# Fail early if selected backend is not available
# pylint: disable=unused-import
if backend == 'webkit':
import PyQt5.QtWebKitWidgets
elif backend == 'webengine':
import PyQt5.QtWebEngineWidgets
else:
raise utils.Unreachable(backend)
return backend
```
###Assistant : Select the backend for running tests.
The backend is auto-selected in the following manner:
1. Use QtWebKit if available
2. Otherwise use QtWebEngine as a fallback
Auto-selection is overridden by either passing a backend via
`--qute-backend=<backend>` or setting the environment variable
`QUTE_TESTS_BACKEND=<backend>`.
Args:
config: pytest config
Raises:
ImportError if the selected backend is not available.
Returns:
The selected backend as a string (e.g. 'webkit').
|
1,135 | def equals(self, other):
from sympy.logic.inference import satisfiable
from sympy.core.relational import Relational
if self.has(Relational) or other.has(Relational):
raise NotImplementedError('handling of relationals')
return self.atoms() == other.atoms() and \
not satisfiable(Not(Equivalent(self, other)))
|
Returns True if the given formulas have the same truth table.
For two formulas to be equal they must have the same literals.
Examples
========
>>> from sympy.abc import A, B, C
>>> from sympy import And, Or, Not
>>> (A >> B).equals(~B >> ~A)
True
>>> Not(And(A, B, C)).equals(And(Not(A), Not(B), Not(C)))
False
>>> Not(And(A, Not(A))).equals(Or(B, Not(B)))
False
| 58 | 28 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def equals(self, other):
from sympy.logic.inference import satisfiable
from sympy.core.relational import Relational
if self.has(Relational) or other.has(Relational):
raise NotImplementedError('handling of relationals')
return self.atoms() == other.atoms() and \
not satisfiable(Not(Equivalent(self, other)))
```
###Assistant :
Returns True if the given formulas have the same truth table.
For two formulas to be equal they must have the same literals.
Examples
========
>>> from sympy.abc import A, B, C
>>> from sympy import And, Or, Not
>>> (A >> B).equals(~B >> ~A)
True
>>> Not(And(A, B, C)).equals(And(Not(A), Not(B), Not(C)))
False
>>> Not(And(A, Not(A))).equals(Or(B, Not(B)))
False
|
1,136 | def get_latest_stock_qty(item_code, warehouse=None):
values, condition = [item_code], ""
if warehouse:
lft, rgt, is_group = frappe.db.get_value("Warehouse", warehouse, ["lft", "rgt", "is_group"])
if is_group:
values.extend([lft, rgt])
condition += "and exists (\
select name from `tabWarehouse` wh where wh.name = tabBin.warehouse\
and wh.lft >= %s and wh.rgt <= %s)"
else:
values.append(warehouse)
condition += " AND warehouse = %s"
actual_qty = frappe.db.sql(
.format(
condition
),
values,
)[0][0]
return actual_qty
| select sum(actual_qty) from tabBin
where item_code=%s {0} | 7 | 64 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def get_latest_stock_qty(item_code, warehouse=None):
values, condition = [item_code], ""
if warehouse:
lft, rgt, is_group = frappe.db.get_value("Warehouse", warehouse, ["lft", "rgt", "is_group"])
if is_group:
values.extend([lft, rgt])
condition += "and exists (\
select name from `tabWarehouse` wh where wh.name = tabBin.warehouse\
and wh.lft >= %s and wh.rgt <= %s)"
else:
values.append(warehouse)
condition += " AND warehouse = %s"
actual_qty = frappe.db.sql(
.format(
condition
),
values,
)[0][0]
return actual_qty
```
###Assistant : select sum(actual_qty) from tabBin
where item_code=%s {0}
|
1,137 | def get_feature_names_out(self, input_features=None):
input_features = _check_feature_names_in(
self, input_features, generate_names=True
)
est_name = self.__class__.__name__.lower()
names_list = [f"{est_name}_{name}_sqrt" for name in input_features]
for j in range(1, self.sample_steps):
cos_names = [f"{est_name}_{name}_cos{j}" for name in input_features]
sin_names = [f"{est_name}_{name}_sin{j}" for name in input_features]
names_list.extend(cos_names + sin_names)
return np.asarray(names_list, dtype=object)
| Get output feature names for transformation.
Parameters
----------
input_features : array-like of str or None, default=None
Only used to validate feature names with the names seen in :meth:`fit`.
Returns
-------
feature_names_out : ndarray of str objects
Transformed feature names.
| 39 | 45 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def get_feature_names_out(self, input_features=None):
input_features = _check_feature_names_in(
self, input_features, generate_names=True
)
est_name = self.__class__.__name__.lower()
names_list = [f"{est_name}_{name}_sqrt" for name in input_features]
for j in range(1, self.sample_steps):
cos_names = [f"{est_name}_{name}_cos{j}" for name in input_features]
sin_names = [f"{est_name}_{name}_sin{j}" for name in input_features]
names_list.extend(cos_names + sin_names)
return np.asarray(names_list, dtype=object)
```
###Assistant : Get output feature names for transformation.
Parameters
----------
input_features : array-like of str or None, default=None
Only used to validate feature names with the names seen in :meth:`fit`.
Returns
-------
feature_names_out : ndarray of str objects
Transformed feature names.
|
1,138 | def test_render_valid_image_as_context_variable(self):
context = {"image": self.image, "image_node": "fake value"}
node = ImageNode(Variable("image"), "original", "image_node")
rendered = node.render(context)
self.assertEqual(rendered, "")
self.assertIsInstance(context["image_node"], Rendition)
|
Tests that an ImageNode with a valid image and a context variable name
renders an empty string and puts a rendition in the context variable
| 25 | 21 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def test_render_valid_image_as_context_variable(self):
context = {"image": self.image, "image_node": "fake value"}
node = ImageNode(Variable("image"), "original", "image_node")
rendered = node.render(context)
self.assertEqual(rendered, "")
self.assertIsInstance(context["image_node"], Rendition)
```
###Assistant :
Tests that an ImageNode with a valid image and a context variable name
renders an empty string and puts a rendition in the context variable
|
1,139 | def dup_cauchy_lower_bound(f, K):
g = dup_reverse(f)
if len(g) < 2:
raise PolynomialError('Polynomial has no non-zero roots.')
if K.is_ZZ:
K = K.get_field()
b = dup_cauchy_upper_bound(g, K)
return K.one / b
| Compute the Cauchy lower bound on the absolute value of all non-zero
roots of f, real or complex. | 18 | 29 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def dup_cauchy_lower_bound(f, K):
g = dup_reverse(f)
if len(g) < 2:
raise PolynomialError('Polynomial has no non-zero roots.')
if K.is_ZZ:
K = K.get_field()
b = dup_cauchy_upper_bound(g, K)
return K.one / b
```
###Assistant : Compute the Cauchy lower bound on the absolute value of all non-zero
roots of f, real or complex.
|
1,140 | def call_deploy(cls, fname, col_partitions, storage_options, **kwargs):
from pyarrow.parquet import ParquetFile
from modin.core.storage_formats.pandas.parsers import ParquetFileToRead
# If we don't have any columns to read, we should just return an empty
# set of references.
if len(col_partitions) == 0:
return []
filesystem, parquet_files = cls.get_fsspec_files(fname, storage_options)
row_groups_per_file = []
num_row_groups = 0
# Count up the total number of row groups across all files and
# keep track of row groups per file to use later.
for file in parquet_files:
with filesystem.open(file) as f:
row_groups = ParquetFile(f).num_row_groups
row_groups_per_file.append(row_groups)
num_row_groups += row_groups
# step determines how many row groups are going to be in a partition
step = compute_chunksize(
num_row_groups,
NPartitions.get(),
min_block_size=1,
)
current_partition_size = 0
file_index = 0
partition_files = [] # 2D array - each element contains list of chunks to read
row_groups_used_in_current_file = 0
total_row_groups_added = 0
# On each iteration, we add a chunk of one file. That will
# take us either to the end of a partition, or to the end
# of a file.
while total_row_groups_added < num_row_groups:
if current_partition_size == 0:
partition_files.append([])
partition_file = partition_files[-1]
file_path = parquet_files[file_index]
row_group_start = row_groups_used_in_current_file
row_groups_left_in_file = (
row_groups_per_file[file_index] - row_groups_used_in_current_file
)
row_groups_left_for_this_partition = step - current_partition_size
if row_groups_left_for_this_partition <= row_groups_left_in_file:
# File has at least what we need to finish partition
# So finish this partition and start a new one.
num_row_groups_to_add = row_groups_left_for_this_partition
current_partition_size = 0
else:
# File doesn't have enough to complete this partition. Add
# it into current partition and go to next file.
num_row_groups_to_add = row_groups_left_in_file
current_partition_size += num_row_groups_to_add
if num_row_groups_to_add == row_groups_left_in_file:
file_index += 1
row_groups_used_in_current_file = 0
else:
row_groups_used_in_current_file += num_row_groups_to_add
partition_file.append(
ParquetFileToRead(
file_path, row_group_start, row_group_start + num_row_groups_to_add
)
)
total_row_groups_added += num_row_groups_to_add
assert (
total_row_groups_added == num_row_groups
), "row groups added does not match total num of row groups across parquet files"
all_partitions = []
for files_to_read in partition_files:
all_partitions.append(
[
cls.deploy(
cls.parse,
files_for_parser=files_to_read,
columns=cols,
num_returns=3,
storage_options=storage_options,
**kwargs,
)
for cols in col_partitions
]
)
return all_partitions
|
Deploy remote tasks to the workers with passed parameters.
Parameters
----------
fname : str, path object or file-like object
Name of the file to read.
col_partitions : list
List of arrays with columns names that should be read
by each partition.
storage_options : dict
Parameters for specific storage engine.
**kwargs : dict
Parameters of deploying read_* function.
Returns
-------
List
Array with references to the task deploy result for each partition.
| 71 | 327 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def call_deploy(cls, fname, col_partitions, storage_options, **kwargs):
from pyarrow.parquet import ParquetFile
from modin.core.storage_formats.pandas.parsers import ParquetFileToRead
# If we don't have any columns to read, we should just return an empty
# set of references.
if len(col_partitions) == 0:
return []
filesystem, parquet_files = cls.get_fsspec_files(fname, storage_options)
row_groups_per_file = []
num_row_groups = 0
# Count up the total number of row groups across all files and
# keep track of row groups per file to use later.
for file in parquet_files:
with filesystem.open(file) as f:
row_groups = ParquetFile(f).num_row_groups
row_groups_per_file.append(row_groups)
num_row_groups += row_groups
# step determines how many row groups are going to be in a partition
step = compute_chunksize(
num_row_groups,
NPartitions.get(),
min_block_size=1,
)
current_partition_size = 0
file_index = 0
partition_files = [] # 2D array - each element contains list of chunks to read
row_groups_used_in_current_file = 0
total_row_groups_added = 0
# On each iteration, we add a chunk of one file. That will
# take us either to the end of a partition, or to the end
# of a file.
while total_row_groups_added < num_row_groups:
if current_partition_size == 0:
partition_files.append([])
partition_file = partition_files[-1]
file_path = parquet_files[file_index]
row_group_start = row_groups_used_in_current_file
row_groups_left_in_file = (
row_groups_per_file[file_index] - row_groups_used_in_current_file
)
row_groups_left_for_this_partition = step - current_partition_size
if row_groups_left_for_this_partition <= row_groups_left_in_file:
# File has at least what we need to finish partition
# So finish this partition and start a new one.
num_row_groups_to_add = row_groups_left_for_this_partition
current_partition_size = 0
else:
# File doesn't have enough to complete this partition. Add
# it into current partition and go to next file.
num_row_groups_to_add = row_groups_left_in_file
current_partition_size += num_row_groups_to_add
if num_row_groups_to_add == row_groups_left_in_file:
file_index += 1
row_groups_used_in_current_file = 0
else:
row_groups_used_in_current_file += num_row_groups_to_add
partition_file.append(
ParquetFileToRead(
file_path, row_group_start, row_group_start + num_row_groups_to_add
)
)
total_row_groups_added += num_row_groups_to_add
assert (
total_row_groups_added == num_row_groups
), "row groups added does not match total num of row groups across parquet files"
all_partitions = []
for files_to_read in partition_files:
all_partitions.append(
[
cls.deploy(
cls.parse,
files_for_parser=files_to_read,
columns=cols,
num_returns=3,
storage_options=storage_options,
**kwargs,
)
for cols in col_partitions
]
)
return all_partitions
```
###Assistant :
Deploy remote tasks to the workers with passed parameters.
Parameters
----------
fname : str, path object or file-like object
Name of the file to read.
col_partitions : list
List of arrays with columns names that should be read
by each partition.
storage_options : dict
Parameters for specific storage engine.
**kwargs : dict
Parameters of deploying read_* function.
Returns
-------
List
Array with references to the task deploy result for each partition.
|
1,141 | def get_connection(self):
if self.lib == _PSYCOPG_LIB_NAME:
import psycopg2
return psycopg2.connect(*self.args, **self.kwargs)
if self.lib == _SQLALCHEMY_LIB_NAME:
from sqlalchemy import create_engine
return create_engine(*self.args, **self.kwargs).connect()
raise UnsupportedDatabaseException("Unsupported database library")
|
Make the database connection and get it.
For psycopg2, pass all arguments to psycopg2.connect() and return the
result of psycopg2.connect(). For sqlalchemy, pass all arguments to
sqlalchemy.create_engine() and return the result of calling connect()
on the engine.
Returns
-------
Any
The open database connection.
| 44 | 26 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def get_connection(self):
if self.lib == _PSYCOPG_LIB_NAME:
import psycopg2
return psycopg2.connect(*self.args, **self.kwargs)
if self.lib == _SQLALCHEMY_LIB_NAME:
from sqlalchemy import create_engine
return create_engine(*self.args, **self.kwargs).connect()
raise UnsupportedDatabaseException("Unsupported database library")
```
###Assistant :
Make the database connection and get it.
For psycopg2, pass all arguments to psycopg2.connect() and return the
result of psycopg2.connect(). For sqlalchemy, pass all arguments to
sqlalchemy.create_engine() and return the result of calling connect()
on the engine.
Returns
-------
Any
The open database connection.
|
1,142 | def argmax(x, axis=-1):
return tf.argmax(x, axis)
@keras_export("keras.backend.argmin")
@tf.__internal__.dispatch.add_dispatch_support
@doc_controls.do_not_generate_docs | Returns the index of the maximum value along an axis.
Args:
x: Tensor or variable.
axis: axis along which to perform the reduction.
Returns:
A tensor.
| 26 | 9 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def argmax(x, axis=-1):
return tf.argmax(x, axis)
@keras_export("keras.backend.argmin")
@tf.__internal__.dispatch.add_dispatch_support
@doc_controls.do_not_generate_docs
```
###Assistant : Returns the index of the maximum value along an axis.
Args:
x: Tensor or variable.
axis: axis along which to perform the reduction.
Returns:
A tensor.
|
1,143 | def print_rules(self) -> Iterator[str]:
yield from self._defined_facts_lines()
yield ''
yield ''
yield from self._full_implications_lines()
yield ''
yield ''
yield from self._prereq_lines()
yield ''
yield ''
yield from self._beta_rules_lines()
yield ''
yield ''
yield "generated_assumptions = {'defined_facts': defined_facts, 'full_implications': full_implications,"
yield " 'prereq': prereq, 'beta_rules': beta_rules, 'beta_triggers': beta_triggers}"
yield ''
yield ''
| Returns a generator with lines to represent the facts and rules | 11 | 51 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def print_rules(self) -> Iterator[str]:
yield from self._defined_facts_lines()
yield ''
yield ''
yield from self._full_implications_lines()
yield ''
yield ''
yield from self._prereq_lines()
yield ''
yield ''
yield from self._beta_rules_lines()
yield ''
yield ''
yield "generated_assumptions = {'defined_facts': defined_facts, 'full_implications': full_implications,"
yield " 'prereq': prereq, 'beta_rules': beta_rules, 'beta_triggers': beta_triggers}"
yield ''
yield ''
```
###Assistant : Returns a generator with lines to represent the facts and rules
|
1,144 | def ensure_string(self, option, default=None):
self._ensure_stringlike(option, "string", default)
| Ensure that 'option' is a string; if not defined, set it to
'default'.
| 13 | 7 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def ensure_string(self, option, default=None):
self._ensure_stringlike(option, "string", default)
```
###Assistant : Ensure that 'option' is a string; if not defined, set it to
'default'.
|
1,145 | def update_ema(target_params, source_params, rate=0.99):
for targ, src in zip(target_params, source_params):
targ.detach().mul_(rate).add_(src, alpha=1 - rate)
|
Update target parameters to be closer to those of source parameters using
an exponential moving average.
:param target_params: the target parameter sequence.
:param source_params: the source parameter sequence.
:param rate: the EMA rate (closer to 1 means slower).
| 38 | 14 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def update_ema(target_params, source_params, rate=0.99):
for targ, src in zip(target_params, source_params):
targ.detach().mul_(rate).add_(src, alpha=1 - rate)
```
###Assistant :
Update target parameters to be closer to those of source parameters using
an exponential moving average.
:param target_params: the target parameter sequence.
:param source_params: the source parameter sequence.
:param rate: the EMA rate (closer to 1 means slower).
|
1,146 | def _android_folder() -> str | None:
try:
# First try to get path to android app via pyjnius
from jnius import autoclass
Context = autoclass("android.content.Context") # noqa: N806
result: str | None = Context.getFilesDir().getParentFile().getAbsolutePath()
except Exception:
# if fails find an android folder looking path on the sys.path
pattern = re.compile(r"/data/(data|user/\d+)/(.+)/files")
for path in sys.path:
if pattern.match(path):
result = path.split("/files")[0]
break
else:
result = None
return result
@lru_cache(maxsize=1) | :return: base folder for the Android OS or None if cannot be found | 13 | 68 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def _android_folder() -> str | None:
try:
# First try to get path to android app via pyjnius
from jnius import autoclass
Context = autoclass("android.content.Context") # noqa: N806
result: str | None = Context.getFilesDir().getParentFile().getAbsolutePath()
except Exception:
# if fails find an android folder looking path on the sys.path
pattern = re.compile(r"/data/(data|user/\d+)/(.+)/files")
for path in sys.path:
if pattern.match(path):
result = path.split("/files")[0]
break
else:
result = None
return result
@lru_cache(maxsize=1)
```
###Assistant : :return: base folder for the Android OS or None if cannot be found
|
1,147 | def de_bruijn(charset, n, maxlen):
# type: (str, int, int) -> str
k = len(charset)
a = [0] * k * n
sequence = [] # type: List[str]
|
Generate the De Bruijn Sequence up to `maxlen` characters
for the charset `charset` and subsequences of length `n`.
Algorithm modified from wikipedia
https://en.wikipedia.org/wiki/De_Bruijn_sequence
| 23 | 27 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def de_bruijn(charset, n, maxlen):
# type: (str, int, int) -> str
k = len(charset)
a = [0] * k * n
sequence = [] # type: List[str]
```
###Assistant :
Generate the De Bruijn Sequence up to `maxlen` characters
for the charset `charset` and subsequences of length `n`.
Algorithm modified from wikipedia
https://en.wikipedia.org/wiki/De_Bruijn_sequence
|
1,148 | def _add_option_refresh(self) -> None:
logger.debug("Adding refresh option")
btnrefresh = ttk.Button(self.optsframe,
image=get_images().icons["reload"],
command=lambda x="update": preview_trigger().set(x)) # type:ignore
btnrefresh.pack(padx=2, side=tk.RIGHT)
Tooltip(btnrefresh,
text=_("Preview updates at every model save. Click to refresh now."),
wrap_length=200)
logger.debug("Added refresh option")
| Add refresh button to refresh preview immediately | 7 | 33 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def _add_option_refresh(self) -> None:
logger.debug("Adding refresh option")
btnrefresh = ttk.Button(self.optsframe,
image=get_images().icons["reload"],
command=lambda x="update": preview_trigger().set(x)) # type:ignore
btnrefresh.pack(padx=2, side=tk.RIGHT)
Tooltip(btnrefresh,
text=_("Preview updates at every model save. Click to refresh now."),
wrap_length=200)
logger.debug("Added refresh option")
```
###Assistant : Add refresh button to refresh preview immediately
|
1,149 | def close(self):
try:
if hasattr(self, "_close__fp"):
self._close__fp()
if self.fp:
self.fp.close()
self.fp = None
except Exception as msg:
logger.debug("Error closing: %s", msg)
if getattr(self, "map", None):
self.map = None
# Instead of simply setting to None, we're setting up a
# deferred error that will better explain that the core image
# object is gone.
self.im = DeferredError(ValueError("Operation on closed image"))
|
Closes the file pointer, if possible.
This operation will destroy the image core and release its memory.
The image data will be unusable afterward.
This function is required to close images that have multiple frames or
have not had their file read and closed by the
:py:meth:`~PIL.Image.Image.load` method. See :ref:`file-handling` for
more information.
| 53 | 60 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def close(self):
try:
if hasattr(self, "_close__fp"):
self._close__fp()
if self.fp:
self.fp.close()
self.fp = None
except Exception as msg:
logger.debug("Error closing: %s", msg)
if getattr(self, "map", None):
self.map = None
# Instead of simply setting to None, we're setting up a
# deferred error that will better explain that the core image
# object is gone.
self.im = DeferredError(ValueError("Operation on closed image"))
```
###Assistant :
Closes the file pointer, if possible.
This operation will destroy the image core and release its memory.
The image data will be unusable afterward.
This function is required to close images that have multiple frames or
have not had their file read and closed by the
:py:meth:`~PIL.Image.Image.load` method. See :ref:`file-handling` for
more information.
|
1,150 | def diop_general_sum_of_squares(eq, limit=1):
r
var, coeff, diop_type = classify_diop(eq, _dict=False)
if diop_type == GeneralSumOfSquares.name:
return set(GeneralSumOfSquares(eq).solve(limit=limit))
|
Solves the equation `x_{1}^2 + x_{2}^2 + . . . + x_{n}^2 - k = 0`.
Returns at most ``limit`` number of solutions.
Usage
=====
``general_sum_of_squares(eq, limit)`` : Here ``eq`` is an expression which
is assumed to be zero. Also, ``eq`` should be in the form,
`x_{1}^2 + x_{2}^2 + . . . + x_{n}^2 - k = 0`.
Details
=======
When `n = 3` if `k = 4^a(8m + 7)` for some `a, m \in Z` then there will be
no solutions. Refer to [1]_ for more details.
Examples
========
>>> from sympy.solvers.diophantine.diophantine import diop_general_sum_of_squares
>>> from sympy.abc import a, b, c, d, e
>>> diop_general_sum_of_squares(a**2 + b**2 + c**2 + d**2 + e**2 - 2345)
{(15, 22, 22, 24, 24)}
Reference
=========
.. [1] Representing an integer as a sum of three squares, [online],
Available:
http://www.proofwiki.org/wiki/Integer_as_Sum_of_Three_Squares
| 138 | 16 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def diop_general_sum_of_squares(eq, limit=1):
r
var, coeff, diop_type = classify_diop(eq, _dict=False)
if diop_type == GeneralSumOfSquares.name:
return set(GeneralSumOfSquares(eq).solve(limit=limit))
```
###Assistant :
Solves the equation `x_{1}^2 + x_{2}^2 + . . . + x_{n}^2 - k = 0`.
Returns at most ``limit`` number of solutions.
Usage
=====
``general_sum_of_squares(eq, limit)`` : Here ``eq`` is an expression which
is assumed to be zero. Also, ``eq`` should be in the form,
`x_{1}^2 + x_{2}^2 + . . . + x_{n}^2 - k = 0`.
Details
=======
When `n = 3` if `k = 4^a(8m + 7)` for some `a, m \in Z` then there will be
no solutions. Refer to [1]_ for more details.
Examples
========
>>> from sympy.solvers.diophantine.diophantine import diop_general_sum_of_squares
>>> from sympy.abc import a, b, c, d, e
>>> diop_general_sum_of_squares(a**2 + b**2 + c**2 + d**2 + e**2 - 2345)
{(15, 22, 22, 24, 24)}
Reference
=========
.. [1] Representing an integer as a sum of three squares, [online],
Available:
http://www.proofwiki.org/wiki/Integer_as_Sum_of_Three_Squares
|
1,151 | def resolved_combinations(self) -> Tuple[Tuple[ChallengeBody, ...], ...]:
warnings.warn(
"acme.messages.Authorization.resolved_combinations is deprecated and will be "
"removed in a future release.", DeprecationWarning)
return tuple(tuple(self.challenges[idx] for idx in combo)
for combo in self.combinations) # pylint: disable=not-an-iterable
@Directory.register | Combinations with challenges instead of indices.
.. deprecated: 1.30.0
| 9 | 34 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def resolved_combinations(self) -> Tuple[Tuple[ChallengeBody, ...], ...]:
warnings.warn(
"acme.messages.Authorization.resolved_combinations is deprecated and will be "
"removed in a future release.", DeprecationWarning)
return tuple(tuple(self.challenges[idx] for idx in combo)
for combo in self.combinations) # pylint: disable=not-an-iterable
@Directory.register
```
###Assistant : Combinations with challenges instead of indices.
.. deprecated: 1.30.0
|
1,152 | def test_tweedie_log_identity_consistency(p):
half_tweedie_log = HalfTweedieLoss(power=p)
half_tweedie_identity = HalfTweedieLossIdentity(power=p)
n_samples = 10
y_true, raw_prediction = random_y_true_raw_prediction(
loss=half_tweedie_log, n_samples=n_samples, seed=42
)
y_pred = half_tweedie_log.link.inverse(raw_prediction) # exp(raw_prediction)
# Let's compare the loss values, up to some constant term that is dropped
# in HalfTweedieLoss but not in HalfTweedieLossIdentity.
loss_log = half_tweedie_log.loss(
y_true=y_true, raw_prediction=raw_prediction
) + half_tweedie_log.constant_to_optimal_zero(y_true)
loss_identity = half_tweedie_identity.loss(
y_true=y_true, raw_prediction=y_pred
) + half_tweedie_identity.constant_to_optimal_zero(y_true)
# Note that HalfTweedieLoss ignores different constant terms than
# HalfTweedieLossIdentity. Constant terms means terms not depending on
# raw_prediction. By adding these terms, `constant_to_optimal_zero`, both losses
# give the same values.
assert_allclose(loss_log, loss_identity)
# For gradients and hessians, the constant terms do not matter. We have, however,
# to account for the chain rule, i.e. with x=raw_prediction
# gradient_log(x) = d/dx loss_log(x)
# = d/dx loss_identity(exp(x))
# = exp(x) * gradient_identity(exp(x))
# Similarly,
# hessian_log(x) = exp(x) * gradient_identity(exp(x))
# + exp(x)**2 * hessian_identity(x)
gradient_log, hessian_log = half_tweedie_log.gradient_hessian(
y_true=y_true, raw_prediction=raw_prediction
)
gradient_identity, hessian_identity = half_tweedie_identity.gradient_hessian(
y_true=y_true, raw_prediction=y_pred
)
assert_allclose(gradient_log, y_pred * gradient_identity)
assert_allclose(
hessian_log, y_pred * gradient_identity + y_pred**2 * hessian_identity
)
| Test for identical losses when only the link function is different. | 11 | 174 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def test_tweedie_log_identity_consistency(p):
half_tweedie_log = HalfTweedieLoss(power=p)
half_tweedie_identity = HalfTweedieLossIdentity(power=p)
n_samples = 10
y_true, raw_prediction = random_y_true_raw_prediction(
loss=half_tweedie_log, n_samples=n_samples, seed=42
)
y_pred = half_tweedie_log.link.inverse(raw_prediction) # exp(raw_prediction)
# Let's compare the loss values, up to some constant term that is dropped
# in HalfTweedieLoss but not in HalfTweedieLossIdentity.
loss_log = half_tweedie_log.loss(
y_true=y_true, raw_prediction=raw_prediction
) + half_tweedie_log.constant_to_optimal_zero(y_true)
loss_identity = half_tweedie_identity.loss(
y_true=y_true, raw_prediction=y_pred
) + half_tweedie_identity.constant_to_optimal_zero(y_true)
# Note that HalfTweedieLoss ignores different constant terms than
# HalfTweedieLossIdentity. Constant terms means terms not depending on
# raw_prediction. By adding these terms, `constant_to_optimal_zero`, both losses
# give the same values.
assert_allclose(loss_log, loss_identity)
# For gradients and hessians, the constant terms do not matter. We have, however,
# to account for the chain rule, i.e. with x=raw_prediction
# gradient_log(x) = d/dx loss_log(x)
# = d/dx loss_identity(exp(x))
# = exp(x) * gradient_identity(exp(x))
# Similarly,
# hessian_log(x) = exp(x) * gradient_identity(exp(x))
# + exp(x)**2 * hessian_identity(x)
gradient_log, hessian_log = half_tweedie_log.gradient_hessian(
y_true=y_true, raw_prediction=raw_prediction
)
gradient_identity, hessian_identity = half_tweedie_identity.gradient_hessian(
y_true=y_true, raw_prediction=y_pred
)
assert_allclose(gradient_log, y_pred * gradient_identity)
assert_allclose(
hessian_log, y_pred * gradient_identity + y_pred**2 * hessian_identity
)
```
###Assistant : Test for identical losses when only the link function is different.
|
1,153 | def laplace_transform(f, t, s, legacy_matrix=True, **hints):
r
debug('\n***** laplace_transform(%s, %s, %s)'%(f, t, s))
if isinstance(f, MatrixBase) and hasattr(f, 'applyfunc'):
conds = not hints.get('noconds', False)
if conds and legacy_matrix:
SymPyDeprecationWarning(
feature="laplace_transform of a Matrix with noconds=False (default)",
useinstead="the option legacy_matrix=False to get the new behaviour",
issue=21504,
deprecated_since_version="1.9"
).warn()
return f.applyfunc(lambda fij: laplace_transform(fij, t, s, **hints))
else:
elements_trans = [laplace_transform(fij, t, s, **hints) for fij in f]
if conds:
elements, avals, conditions = zip(*elements_trans)
f_laplace = type(f)(*f.shape, elements)
return f_laplace, Max(*avals), And(*conditions)
else:
return type(f)(*f.shape, elements_trans)
return LaplaceTransform(f, t, s).doit(**hints)
@_noconds_(True) |
Compute the Laplace Transform `F(s)` of `f(t)`,
.. math :: F(s) = \int_{0^{-}}^\infty e^{-st} f(t) \mathrm{d}t.
Explanation
===========
For all sensible functions, this converges absolutely in a
half-plane
.. math :: a < \operatorname{Re}(s)
This function returns ``(F, a, cond)`` where ``F`` is the Laplace
transform of ``f``, `a` is the half-plane of convergence, and `cond` are
auxiliary convergence conditions.
The implementation is rule-based, and if you are interested in which
rules are applied, and whether integration is attemped, you can switch
debug information on by setting ``sympy.SYMPY_DEBUG=True``.
The lower bound is `0-`, meaning that this bound should be approached
from the lower side. This is only necessary if distributions are involved.
At present, it is only done if `f(t)` contains ``DiracDelta``, in which
case the Laplace transform is computed implicitly as
.. math :: F(s) = \lim_{\tau\to 0^{-}} \int_{\tau}^\infty e^{-st} f(t) \mathrm{d}t
by applying rules.
If the integral cannot be fully computed in closed form, this function
returns an unevaluated :class:`LaplaceTransform` object.
For a description of possible hints, refer to the docstring of
:func:`sympy.integrals.transforms.IntegralTransform.doit`. If ``noconds=True``,
only `F` will be returned (i.e. not ``cond``, and also not the plane ``a``).
.. deprecated:: 1.9
Legacy behavior for matrices where ``laplace_transform`` with
``noconds=False`` (the default) returns a Matrix whose elements are
tuples. The behavior of ``laplace_transform`` for matrices will change
in a future release of SymPy to return a tuple of the transformed
Matrix and the convergence conditions for the matrix as a whole. Use
``legacy_matrix=False`` to enable the new behavior.
Examples
========
>>> from sympy import DiracDelta, exp, laplace_transform
>>> from sympy.abc import t, s, a
>>> laplace_transform(t**4, t, s)
(24/s**5, 0, True)
>>> laplace_transform(t**a, t, s)
(gamma(a + 1)/(s*s**a), 0, re(a) > -1)
>>> laplace_transform(DiracDelta(t)-a*exp(-a*t),t,s)
(s/(a + s), Max(0, -a), True)
See Also
========
inverse_laplace_transform, mellin_transform, fourier_transform
hankel_transform, inverse_hankel_transform
| 300 | 89 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def laplace_transform(f, t, s, legacy_matrix=True, **hints):
r
debug('\n***** laplace_transform(%s, %s, %s)'%(f, t, s))
if isinstance(f, MatrixBase) and hasattr(f, 'applyfunc'):
conds = not hints.get('noconds', False)
if conds and legacy_matrix:
SymPyDeprecationWarning(
feature="laplace_transform of a Matrix with noconds=False (default)",
useinstead="the option legacy_matrix=False to get the new behaviour",
issue=21504,
deprecated_since_version="1.9"
).warn()
return f.applyfunc(lambda fij: laplace_transform(fij, t, s, **hints))
else:
elements_trans = [laplace_transform(fij, t, s, **hints) for fij in f]
if conds:
elements, avals, conditions = zip(*elements_trans)
f_laplace = type(f)(*f.shape, elements)
return f_laplace, Max(*avals), And(*conditions)
else:
return type(f)(*f.shape, elements_trans)
return LaplaceTransform(f, t, s).doit(**hints)
@_noconds_(True)
```
###Assistant :
Compute the Laplace Transform `F(s)` of `f(t)`,
.. math :: F(s) = \int_{0^{-}}^\infty e^{-st} f(t) \mathrm{d}t.
Explanation
===========
For all sensible functions, this converges absolutely in a
half-plane
.. math :: a < \operatorname{Re}(s)
This function returns ``(F, a, cond)`` where ``F`` is the Laplace
transform of ``f``, `a` is the half-plane of convergence, and `cond` are
auxiliary convergence conditions.
The implementation is rule-based, and if you are interested in which
rules are applied, and whether integration is attemped, you can switch
debug information on by setting ``sympy.SYMPY_DEBUG=True``.
The lower bound is `0-`, meaning that this bound should be approached
from the lower side. This is only necessary if distributions are involved.
At present, it is only done if `f(t)` contains ``DiracDelta``, in which
case the Laplace transform is computed implicitly as
.. math :: F(s) = \lim_{\tau\to 0^{-}} \int_{\tau}^\infty e^{-st} f(t) \mathrm{d}t
by applying rules.
If the integral cannot be fully computed in closed form, this function
returns an unevaluated :class:`LaplaceTransform` object.
For a description of possible hints, refer to the docstring of
:func:`sympy.integrals.transforms.IntegralTransform.doit`. If ``noconds=True``,
only `F` will be returned (i.e. not ``cond``, and also not the plane ``a``).
.. deprecated:: 1.9
Legacy behavior for matrices where ``laplace_transform`` with
``noconds=False`` (the default) returns a Matrix whose elements are
tuples. The behavior of ``laplace_transform`` for matrices will change
in a future release of SymPy to return a tuple of the transformed
Matrix and the convergence conditions for the matrix as a whole. Use
``legacy_matrix=False`` to enable the new behavior.
Examples
========
>>> from sympy import DiracDelta, exp, laplace_transform
>>> from sympy.abc import t, s, a
>>> laplace_transform(t**4, t, s)
(24/s**5, 0, True)
>>> laplace_transform(t**a, t, s)
(gamma(a + 1)/(s*s**a), 0, re(a) > -1)
>>> laplace_transform(DiracDelta(t)-a*exp(-a*t),t,s)
(s/(a + s), Max(0, -a), True)
See Also
========
inverse_laplace_transform, mellin_transform, fourier_transform
hankel_transform, inverse_hankel_transform
|
1,154 | def reduce_annotations(anno_types, answers):
for at in set(anno_types):
assert at in ("no_answer", "short_answer")
if anno_types.count("short_answer") >= anno_types.count("no_answer"):
majority = "short_answer"
is_impossible = False
else:
majority = "no_answer"
is_impossible = True
answers = [a for at, a in zip(anno_types, answers) if at == majority]
reduction = len(anno_types) - len(answers)
assert reduction < 3
if not is_impossible:
global n_no_ans
n_no_ans += reduction
else:
global n_short
n_short += reduction
answers = []
return answers, is_impossible
|
In cases where there is annotator disagreement, this fn picks either only the short_answers or only the no_answers,
depending on which is more numerous, with a bias towards picking short_answers.
Note: By this stage, all long_answer annotations and all samples with yes/no answer have been removed.
This leaves just no_answer and short_answers | 52 | 71 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def reduce_annotations(anno_types, answers):
for at in set(anno_types):
assert at in ("no_answer", "short_answer")
if anno_types.count("short_answer") >= anno_types.count("no_answer"):
majority = "short_answer"
is_impossible = False
else:
majority = "no_answer"
is_impossible = True
answers = [a for at, a in zip(anno_types, answers) if at == majority]
reduction = len(anno_types) - len(answers)
assert reduction < 3
if not is_impossible:
global n_no_ans
n_no_ans += reduction
else:
global n_short
n_short += reduction
answers = []
return answers, is_impossible
```
###Assistant :
In cases where there is annotator disagreement, this fn picks either only the short_answers or only the no_answers,
depending on which is more numerous, with a bias towards picking short_answers.
Note: By this stage, all long_answer annotations and all samples with yes/no answer have been removed.
This leaves just no_answer and short_answers
|
1,155 | async def async_open_cover(self, **kwargs):
await mqtt.async_publish(
self.hass,
self._config.get(CONF_COMMAND_TOPIC),
self._config[CONF_PAYLOAD_OPEN],
self._config[CONF_QOS],
self._config[CONF_RETAIN],
self._config[CONF_ENCODING],
)
if self._optimistic:
# Optimistically assume that cover has changed state.
self._state = STATE_OPEN
if self._config.get(CONF_GET_POSITION_TOPIC):
self._position = self.find_percentage_in_range(
self._config[CONF_POSITION_OPEN], COVER_PAYLOAD
)
self.async_write_ha_state()
| Move the cover up.
This method is a coroutine.
| 9 | 35 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
async def async_open_cover(self, **kwargs):
await mqtt.async_publish(
self.hass,
self._config.get(CONF_COMMAND_TOPIC),
self._config[CONF_PAYLOAD_OPEN],
self._config[CONF_QOS],
self._config[CONF_RETAIN],
self._config[CONF_ENCODING],
)
if self._optimistic:
# Optimistically assume that cover has changed state.
self._state = STATE_OPEN
if self._config.get(CONF_GET_POSITION_TOPIC):
self._position = self.find_percentage_in_range(
self._config[CONF_POSITION_OPEN], COVER_PAYLOAD
)
self.async_write_ha_state()
```
###Assistant : Move the cover up.
This method is a coroutine.
|
1,156 | def _parse_distro_release_content(line):
# type: (str) -> Dict[str, str]
matches = _DISTRO_RELEASE_CONTENT_REVERSED_PATTERN.match(line.strip()[::-1])
distro_info = {}
if matches:
# regexp ensures non-None
distro_info["name"] = matches.group(3)[::-1]
if matches.group(2):
distro_info["version_id"] = matches.group(2)[::-1]
if matches.group(1):
distro_info["codename"] = matches.group(1)[::-1]
elif line:
distro_info["name"] = line.strip()
return distro_info
_distro = LinuxDistribution()
|
Parse a line from a distro release file.
Parameters:
* line: Line from the distro release file. Must be a unicode string
or a UTF-8 encoded byte string.
Returns:
A dictionary containing all information items.
| 35 | 43 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def _parse_distro_release_content(line):
# type: (str) -> Dict[str, str]
matches = _DISTRO_RELEASE_CONTENT_REVERSED_PATTERN.match(line.strip()[::-1])
distro_info = {}
if matches:
# regexp ensures non-None
distro_info["name"] = matches.group(3)[::-1]
if matches.group(2):
distro_info["version_id"] = matches.group(2)[::-1]
if matches.group(1):
distro_info["codename"] = matches.group(1)[::-1]
elif line:
distro_info["name"] = line.strip()
return distro_info
_distro = LinuxDistribution()
```
###Assistant :
Parse a line from a distro release file.
Parameters:
* line: Line from the distro release file. Must be a unicode string
or a UTF-8 encoded byte string.
Returns:
A dictionary containing all information items.
|
1,157 | def load_sharded_checkpoint(model, folder, strict=True):
# Load the index
index_file = os.path.join(folder, WEIGHTS_INDEX_NAME)
if not os.path.isfile(index_file):
raise ValueError(f"Can't find a checkpoint index ({WEIGHTS_INDEX_NAME}) in {folder}.")
with open(index_file, "r", encoding="utf-8") as f:
index = json.load(f)
shard_files = list(set(index["weight_map"].values()))
# If strict=True, error before loading any of the state dicts.
loaded_keys = index["weight_map"].keys()
model_keys = model.state_dict().keys()
missing_keys = [key for key in model_keys if key not in loaded_keys]
unexpected_keys = [key for key in loaded_keys if key not in model_keys]
if strict and (len(missing_keys) > 0 or len(unexpected_keys) > 0):
error_message = f"Error(s) in loading state_dict for {model.__class__.__name__}"
if len(missing_keys) > 0:
str_missing_keys = ",".join([f'"{k}"' for k in missing_keys])
error_message += f"\nMissing key(s): {str_missing_keys}."
if len(unexpected_keys) > 0:
str_unexpected_keys = ",".join([f'"{k}"' for k in unexpected_keys])
error_message += f"\nMissing key(s): {str_unexpected_keys}."
raise RuntimeError(error_message)
for shard_file in shard_files:
state_dict = torch.load(os.path.join(folder, shard_file))
model.load_state_dict(state_dict, strict=False)
# Make sure memory is fred before we load the next state dict.
del state_dict
gc.collect()
# Return the same thing as PyTorch load_state_dict function.
return torch.nn.modules.module._IncompatibleKeys(missing_keys, unexpected_keys)
|
This is the same as
[`torch.nn.Module.load_state_dict`](https://pytorch.org/docs/stable/generated/torch.nn.Module.html?highlight=load_state_dict#torch.nn.Module.load_state_dict)
but for a sharded checkpoint.
This load is performed efficiently: each checkpoint shard is loaded one by one in RAM and deleted after being
loaded in the model.
Args:
model (`torch.nn.Module`): The model in which to load the checkpoint.
folder (`str` or `os.PathLike`): A path to a folder containing the sharded checkpoint.
strict (`bool`, *optional`, defaults to `True`):
Whether to strictly enforce that the keys in the model state dict match the keys in the sharded checkpoint.
Returns:
`NamedTuple`: A named tuple with `missing_keys` and `unexpected_keys` fields
- `missing_keys` is a list of str containing the missing keys
- `unexpected_keys` is a list of str containing the unexpected keys
| 115 | 167 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def load_sharded_checkpoint(model, folder, strict=True):
# Load the index
index_file = os.path.join(folder, WEIGHTS_INDEX_NAME)
if not os.path.isfile(index_file):
raise ValueError(f"Can't find a checkpoint index ({WEIGHTS_INDEX_NAME}) in {folder}.")
with open(index_file, "r", encoding="utf-8") as f:
index = json.load(f)
shard_files = list(set(index["weight_map"].values()))
# If strict=True, error before loading any of the state dicts.
loaded_keys = index["weight_map"].keys()
model_keys = model.state_dict().keys()
missing_keys = [key for key in model_keys if key not in loaded_keys]
unexpected_keys = [key for key in loaded_keys if key not in model_keys]
if strict and (len(missing_keys) > 0 or len(unexpected_keys) > 0):
error_message = f"Error(s) in loading state_dict for {model.__class__.__name__}"
if len(missing_keys) > 0:
str_missing_keys = ",".join([f'"{k}"' for k in missing_keys])
error_message += f"\nMissing key(s): {str_missing_keys}."
if len(unexpected_keys) > 0:
str_unexpected_keys = ",".join([f'"{k}"' for k in unexpected_keys])
error_message += f"\nMissing key(s): {str_unexpected_keys}."
raise RuntimeError(error_message)
for shard_file in shard_files:
state_dict = torch.load(os.path.join(folder, shard_file))
model.load_state_dict(state_dict, strict=False)
# Make sure memory is fred before we load the next state dict.
del state_dict
gc.collect()
# Return the same thing as PyTorch load_state_dict function.
return torch.nn.modules.module._IncompatibleKeys(missing_keys, unexpected_keys)
```
###Assistant :
This is the same as
[`torch.nn.Module.load_state_dict`](https://pytorch.org/docs/stable/generated/torch.nn.Module.html?highlight=load_state_dict#torch.nn.Module.load_state_dict)
but for a sharded checkpoint.
This load is performed efficiently: each checkpoint shard is loaded one by one in RAM and deleted after being
loaded in the model.
Args:
model (`torch.nn.Module`): The model in which to load the checkpoint.
folder (`str` or `os.PathLike`): A path to a folder containing the sharded checkpoint.
strict (`bool`, *optional`, defaults to `True`):
Whether to strictly enforce that the keys in the model state dict match the keys in the sharded checkpoint.
Returns:
`NamedTuple`: A named tuple with `missing_keys` and `unexpected_keys` fields
- `missing_keys` is a list of str containing the missing keys
- `unexpected_keys` is a list of str containing the unexpected keys
|
1,158 | def customer_query(doctype, txt, searchfield, start, page_len, filters):
conditions = []
cust_master_name = frappe.defaults.get_user_default("cust_master_name")
if cust_master_name == "Customer Name":
fields = ["name", "customer_group", "territory"]
else:
fields = ["name", "customer_name", "customer_group", "territory"]
fields = get_fields("Customer", fields)
searchfields = frappe.get_meta("Customer").get_search_fields()
searchfields = " or ".join(field + " like %(txt)s" for field in searchfields)
return frappe.db.sql(
.format(
**{
"fields": ", ".join(fields),
"scond": searchfields,
"mcond": get_match_cond(doctype),
"fcond": get_filters_cond(doctype, filters, conditions).replace("%", "%%"),
}
),
{"txt": "%%%s%%" % txt, "_txt": txt.replace("%", ""), "start": start, "page_len": page_len},
)
# searches for supplier
@frappe.whitelist()
@frappe.validate_and_sanitize_search_inputs | select {fields} from `tabCustomer`
where docstatus < 2
and ({scond}) and disabled=0
{fcond} {mcond}
order by
if(locate(%(_txt)s, name), locate(%(_txt)s, name), 99999),
if(locate(%(_txt)s, customer_name), locate(%(_txt)s, customer_name), 99999),
idx desc,
name, customer_name
limit %(start)s, %(page_len)s | 33 | 86 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def customer_query(doctype, txt, searchfield, start, page_len, filters):
conditions = []
cust_master_name = frappe.defaults.get_user_default("cust_master_name")
if cust_master_name == "Customer Name":
fields = ["name", "customer_group", "territory"]
else:
fields = ["name", "customer_name", "customer_group", "territory"]
fields = get_fields("Customer", fields)
searchfields = frappe.get_meta("Customer").get_search_fields()
searchfields = " or ".join(field + " like %(txt)s" for field in searchfields)
return frappe.db.sql(
.format(
**{
"fields": ", ".join(fields),
"scond": searchfields,
"mcond": get_match_cond(doctype),
"fcond": get_filters_cond(doctype, filters, conditions).replace("%", "%%"),
}
),
{"txt": "%%%s%%" % txt, "_txt": txt.replace("%", ""), "start": start, "page_len": page_len},
)
# searches for supplier
@frappe.whitelist()
@frappe.validate_and_sanitize_search_inputs
```
###Assistant : select {fields} from `tabCustomer`
where docstatus < 2
and ({scond}) and disabled=0
{fcond} {mcond}
order by
if(locate(%(_txt)s, name), locate(%(_txt)s, name), 99999),
if(locate(%(_txt)s, customer_name), locate(%(_txt)s, customer_name), 99999),
idx desc,
name, customer_name
limit %(start)s, %(page_len)s
|
1,159 | def get_group_tag_value_count(self, group, environment_id, key):
raise NotImplementedError
|
>>> get_group_tag_value_count(group, 3, 'key1')
| 4 | 7 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def get_group_tag_value_count(self, group, environment_id, key):
raise NotImplementedError
```
###Assistant :
>>> get_group_tag_value_count(group, 3, 'key1')
|
1,160 | def test_enqueue_task_instances_sets_ti_state_to_None_if_dagrun_in_finish_state(self, state, dag_maker):
dag_id = 'SchedulerJobTest.test_enqueue_task_instances_with_queued_state'
task_id_1 = 'dummy'
session = settings.Session()
with dag_maker(dag_id=dag_id, start_date=DEFAULT_DATE, session=session):
task1 = EmptyOperator(task_id=task_id_1)
self.scheduler_job = SchedulerJob(subdir=os.devnull)
dr1 = dag_maker.create_dagrun(state=state)
ti = dr1.get_task_instance(task1.task_id, session)
ti.state = State.SCHEDULED
session.merge(ti)
session.commit()
with patch.object(BaseExecutor, 'queue_command') as mock_queue_command:
self.scheduler_job._enqueue_task_instances_with_queued_state([ti])
ti.refresh_from_db()
assert ti.state == State.NONE
mock_queue_command.assert_not_called()
| This tests that task instances whose dagrun is in finished state are not queued | 14 | 47 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def test_enqueue_task_instances_sets_ti_state_to_None_if_dagrun_in_finish_state(self, state, dag_maker):
dag_id = 'SchedulerJobTest.test_enqueue_task_instances_with_queued_state'
task_id_1 = 'dummy'
session = settings.Session()
with dag_maker(dag_id=dag_id, start_date=DEFAULT_DATE, session=session):
task1 = EmptyOperator(task_id=task_id_1)
self.scheduler_job = SchedulerJob(subdir=os.devnull)
dr1 = dag_maker.create_dagrun(state=state)
ti = dr1.get_task_instance(task1.task_id, session)
ti.state = State.SCHEDULED
session.merge(ti)
session.commit()
with patch.object(BaseExecutor, 'queue_command') as mock_queue_command:
self.scheduler_job._enqueue_task_instances_with_queued_state([ti])
ti.refresh_from_db()
assert ti.state == State.NONE
mock_queue_command.assert_not_called()
```
###Assistant : This tests that task instances whose dagrun is in finished state are not queued
|
1,161 | def match_hostname(cert, hostname):
if not cert:
raise ValueError("empty or no certificate, match_hostname needs a "
"SSL socket or SSL context with either "
"CERT_OPTIONAL or CERT_REQUIRED")
dnsnames = []
san = cert.get('subjectAltName', ())
for key, value in san:
if key == 'DNS':
if _dnsname_match(value, hostname):
return
dnsnames.append(value)
if not dnsnames:
# The subject is only checked when there is no dNSName entry
# in subjectAltName
for sub in cert.get('subject', ()):
for key, value in sub:
# XXX according to RFC 2818, the most specific Common Name
# must be used.
if key == 'commonName':
if _dnsname_match(value, hostname):
return
dnsnames.append(value)
if len(dnsnames) > 1:
raise CertificateError("hostname %r "
"doesn't match either of %s"
% (hostname, ', '.join(map(repr, dnsnames))))
elif len(dnsnames) == 1:
raise CertificateError("hostname %r "
"doesn't match %r"
% (hostname, dnsnames[0]))
else:
raise CertificateError("no appropriate commonName or "
"subjectAltName fields were found")
try:
from types import SimpleNamespace as Container
except ImportError: # pragma: no cover | Verify that *cert* (in decoded format as returned by
SSLSocket.getpeercert()) matches the *hostname*. RFC 2818 and RFC 6125
rules are followed, but IP addresses are not accepted for *hostname*.
CertificateError is raised on failure. On success, the function
returns nothing.
| 40 | 155 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def match_hostname(cert, hostname):
if not cert:
raise ValueError("empty or no certificate, match_hostname needs a "
"SSL socket or SSL context with either "
"CERT_OPTIONAL or CERT_REQUIRED")
dnsnames = []
san = cert.get('subjectAltName', ())
for key, value in san:
if key == 'DNS':
if _dnsname_match(value, hostname):
return
dnsnames.append(value)
if not dnsnames:
# The subject is only checked when there is no dNSName entry
# in subjectAltName
for sub in cert.get('subject', ()):
for key, value in sub:
# XXX according to RFC 2818, the most specific Common Name
# must be used.
if key == 'commonName':
if _dnsname_match(value, hostname):
return
dnsnames.append(value)
if len(dnsnames) > 1:
raise CertificateError("hostname %r "
"doesn't match either of %s"
% (hostname, ', '.join(map(repr, dnsnames))))
elif len(dnsnames) == 1:
raise CertificateError("hostname %r "
"doesn't match %r"
% (hostname, dnsnames[0]))
else:
raise CertificateError("no appropriate commonName or "
"subjectAltName fields were found")
try:
from types import SimpleNamespace as Container
except ImportError: # pragma: no cover
```
###Assistant : Verify that *cert* (in decoded format as returned by
SSLSocket.getpeercert()) matches the *hostname*. RFC 2818 and RFC 6125
rules are followed, but IP addresses are not accepted for *hostname*.
CertificateError is raised on failure. On success, the function
returns nothing.
|
1,162 | def take(self, n) -> "IterableDataset":
ex_iterable = TakeExamplesIterable(self._ex_iterable, n)
return iterable_dataset(
ex_iterable=ex_iterable,
info=self._info.copy(),
split=self._split,
format_type=self._format_type,
shuffling=copy.deepcopy(self._shuffling),
token_per_repo_id=self._token_per_repo_id,
)
|
Create a new IterableDataset with only the first ``n`` elements.
Args:
n (:obj:`int`): number of elements to take.
Example:
```py
>>> from datasets import load_dataset
>>> ds = load_dataset("rotten_tomatoes", split="train", streaming=True)
>>> small_ds = ds.take(2)
>>> list(small_ds)
[{'label': 1,
'text': 'the rock is destined to be the 21st century\'s new " conan " and that he\'s going to make a splash even greater than arnold schwarzenegger , jean-claud van damme or steven segal .'},
{'label': 1,
'text': 'the gorgeously elaborate continuation of " the lord of the rings " trilogy is so huge that a column of words cannot adequately describe co-writer/director peter jackson\'s expanded vision of j . r . r . tolkien\'s middle-earth .'}]
```
| 117 | 18 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def take(self, n) -> "IterableDataset":
ex_iterable = TakeExamplesIterable(self._ex_iterable, n)
return iterable_dataset(
ex_iterable=ex_iterable,
info=self._info.copy(),
split=self._split,
format_type=self._format_type,
shuffling=copy.deepcopy(self._shuffling),
token_per_repo_id=self._token_per_repo_id,
)
```
###Assistant :
Create a new IterableDataset with only the first ``n`` elements.
Args:
n (:obj:`int`): number of elements to take.
Example:
```py
>>> from datasets import load_dataset
>>> ds = load_dataset("rotten_tomatoes", split="train", streaming=True)
>>> small_ds = ds.take(2)
>>> list(small_ds)
[{'label': 1,
'text': 'the rock is destined to be the 21st century\'s new " conan " and that he\'s going to make a splash even greater than arnold schwarzenegger , jean-claud van damme or steven segal .'},
{'label': 1,
'text': 'the gorgeously elaborate continuation of " the lord of the rings " trilogy is so huge that a column of words cannot adequately describe co-writer/director peter jackson\'s expanded vision of j . r . r . tolkien\'s middle-earth .'}]
```
|
1,163 | def test_category_match_group(self):
from sentry.grouping.enhancer import Enhancements
enhancement = Enhancements.from_config_string(
,
)
event = make_event(
platform="native",
exception={
"values": [
{
"type": "Hello",
"stacktrace": {
"frames": [
{
"function": "foo",
},
{
"function": "bar",
},
]
},
}
]
},
)
manager = EventManager(event)
manager.normalize()
grouping_config = {
"enhancements": enhancement.dumps(),
"id": "mobile:2021-02-12",
}
manager.get_data()["grouping_config"] = grouping_config
event1 = manager.save(self.project.id)
event2 = Event(event1.project_id, event1.event_id, data=event1.data)
assert event1.get_hashes().hashes == event2.get_hashes(grouping_config).hashes
|
Regression test to ensure categories are applied consistently and don't
produce hash mismatches.
function:foo category=foo_like
category:foo_like -group
| 17 | 66 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def test_category_match_group(self):
from sentry.grouping.enhancer import Enhancements
enhancement = Enhancements.from_config_string(
,
)
event = make_event(
platform="native",
exception={
"values": [
{
"type": "Hello",
"stacktrace": {
"frames": [
{
"function": "foo",
},
{
"function": "bar",
},
]
},
}
]
},
)
manager = EventManager(event)
manager.normalize()
grouping_config = {
"enhancements": enhancement.dumps(),
"id": "mobile:2021-02-12",
}
manager.get_data()["grouping_config"] = grouping_config
event1 = manager.save(self.project.id)
event2 = Event(event1.project_id, event1.event_id, data=event1.data)
assert event1.get_hashes().hashes == event2.get_hashes(grouping_config).hashes
```
###Assistant :
Regression test to ensure categories are applied consistently and don't
produce hash mismatches.
function:foo category=foo_like
category:foo_like -group
|
1,164 | def notify(self, notification, raise_exception=False):
event = notification.event
try:
return self.notify_users(
event.group, event, triggering_rules=[r.label for r in notification.rules]
)
except (
ApiError,
HTTPError,
InvalidIdentity,
PluginError,
SSLError,
UrllibHTTPError,
) as err:
self.logger.info(
"notification-plugin.notify-failed",
extra={
"error": str(err),
"plugin": self.slug,
"project_id": event.group.project_id,
"organization_id": event.group.project.organization_id,
},
)
if raise_exception:
raise err
return False
|
This calls the notify_users method of the plugin.
Normally this method eats the error and logs it but if we
set raise_exception=True like we do for the test plugin button,
the exception is raised
| 34 | 48 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def notify(self, notification, raise_exception=False):
event = notification.event
try:
return self.notify_users(
event.group, event, triggering_rules=[r.label for r in notification.rules]
)
except (
ApiError,
HTTPError,
InvalidIdentity,
PluginError,
SSLError,
UrllibHTTPError,
) as err:
self.logger.info(
"notification-plugin.notify-failed",
extra={
"error": str(err),
"plugin": self.slug,
"project_id": event.group.project_id,
"organization_id": event.group.project.organization_id,
},
)
if raise_exception:
raise err
return False
```
###Assistant :
This calls the notify_users method of the plugin.
Normally this method eats the error and logs it but if we
set raise_exception=True like we do for the test plugin button,
the exception is raised
|
1,165 | def test_processors(self):
from djangocms_text_ckeditor.cms_plugins import TextPlugin
from cms.plugin_pool import plugin_pool
instance = CMSPlugin.objects.all()[0].get_plugin_instance()[0]
load_from_string = self.load_template_from_string
|
Tests that plugin processors and plugin context processors can be defined
in settings and are working and that extra plugin context processors can be
passed to PluginContext.
| 27 | 16 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def test_processors(self):
from djangocms_text_ckeditor.cms_plugins import TextPlugin
from cms.plugin_pool import plugin_pool
instance = CMSPlugin.objects.all()[0].get_plugin_instance()[0]
load_from_string = self.load_template_from_string
```
###Assistant :
Tests that plugin processors and plugin context processors can be defined
in settings and are working and that extra plugin context processors can be
passed to PluginContext.
|
1,166 | async def test_unique_id_ignore(hass, manager):
async_setup_entry = AsyncMock(return_value=False)
mock_integration(hass, MockModule("comp", async_setup_entry=async_setup_entry))
mock_entity_platform(hass, "config_flow.comp", None)
| Test that we can ignore flows that are in progress and have a unique ID. | 15 | 13 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
async def test_unique_id_ignore(hass, manager):
async_setup_entry = AsyncMock(return_value=False)
mock_integration(hass, MockModule("comp", async_setup_entry=async_setup_entry))
mock_entity_platform(hass, "config_flow.comp", None)
```
###Assistant : Test that we can ignore flows that are in progress and have a unique ID.
|
1,167 | def bind(self, bind_string, key, propagate=True):
if not self._is_window_created('tried Window.bind'):
return
self.TKroot.bind(bind_string, lambda evt: self._user_bind_callback(bind_string, evt, propagate))
self.user_bind_dict[bind_string] = key
|
Used to add tkinter events to a Window.
The tkinter specific data is in the Window's member variable user_bind_event
:param bind_string: The string tkinter expected in its bind function
:type bind_string: (str)
:param key: The event that will be generated when the tkinter event occurs
:type key: str | int | tuple | object
:param propagate: If True then tkinter will be told to propagate the event
:type propagate: (bool)
| 70 | 19 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def bind(self, bind_string, key, propagate=True):
if not self._is_window_created('tried Window.bind'):
return
self.TKroot.bind(bind_string, lambda evt: self._user_bind_callback(bind_string, evt, propagate))
self.user_bind_dict[bind_string] = key
```
###Assistant :
Used to add tkinter events to a Window.
The tkinter specific data is in the Window's member variable user_bind_event
:param bind_string: The string tkinter expected in its bind function
:type bind_string: (str)
:param key: The event that will be generated when the tkinter event occurs
:type key: str | int | tuple | object
:param propagate: If True then tkinter will be told to propagate the event
:type propagate: (bool)
|
1,168 | def forward(self, body_feats=None, rois=None, rois_num=None, inputs=None):
targets = []
if self.training:
rois, rois_num, targets = self.bbox_assigner(rois, rois_num, inputs)
targets_list = [targets]
self.assigned_rois = (rois, rois_num)
self.assigned_targets = targets
pred_bbox = None
head_out_list = []
for i in range(self.num_cascade_stages):
if i > 0:
rois, rois_num = self._get_rois_from_boxes(pred_bbox,
inputs['im_shape'])
if self.training:
rois, rois_num, targets = self.bbox_assigner(
rois, rois_num, inputs, i, is_cascade=True)
targets_list.append(targets)
rois_feat = self.roi_extractor(body_feats, rois, rois_num)
bbox_feat = self.head(rois_feat, i)
scores = self.bbox_score_list[i](bbox_feat)
deltas = self.bbox_delta_list[i](bbox_feat)
# TODO (lyuwenyu) Is it correct for only one class ?
if not self.reg_class_agnostic and i < self.num_cascade_stages - 1:
deltas = deltas.reshape([-1, self.num_classes, 4])
labels = scores[:, :-1].argmax(axis=-1)
deltas = deltas[paddle.arange(deltas.shape[0]), labels]
head_out_list.append([scores, deltas, rois])
pred_bbox = self._get_pred_bbox(deltas, rois, self.bbox_weight[i])
if self.training:
loss = {}
for stage, value in enumerate(zip(head_out_list, targets_list)):
(scores, deltas, rois), targets = value
loss_stage = self.get_loss(scores, deltas, targets, rois,
self.bbox_weight[stage])
for k, v in loss_stage.items():
loss[k + "_stage{}".format(
stage)] = v / self.num_cascade_stages
return loss, bbox_feat
else:
scores, deltas, self.refined_rois = self.get_prediction(
head_out_list)
return (deltas, scores), self.head
|
body_feats (list[Tensor]): Feature maps from backbone
rois (Tensor): RoIs generated from RPN module
rois_num (Tensor): The number of RoIs in each image
inputs (dict{Tensor}): The ground-truth of image
| 28 | 167 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def forward(self, body_feats=None, rois=None, rois_num=None, inputs=None):
targets = []
if self.training:
rois, rois_num, targets = self.bbox_assigner(rois, rois_num, inputs)
targets_list = [targets]
self.assigned_rois = (rois, rois_num)
self.assigned_targets = targets
pred_bbox = None
head_out_list = []
for i in range(self.num_cascade_stages):
if i > 0:
rois, rois_num = self._get_rois_from_boxes(pred_bbox,
inputs['im_shape'])
if self.training:
rois, rois_num, targets = self.bbox_assigner(
rois, rois_num, inputs, i, is_cascade=True)
targets_list.append(targets)
rois_feat = self.roi_extractor(body_feats, rois, rois_num)
bbox_feat = self.head(rois_feat, i)
scores = self.bbox_score_list[i](bbox_feat)
deltas = self.bbox_delta_list[i](bbox_feat)
# TODO (lyuwenyu) Is it correct for only one class ?
if not self.reg_class_agnostic and i < self.num_cascade_stages - 1:
deltas = deltas.reshape([-1, self.num_classes, 4])
labels = scores[:, :-1].argmax(axis=-1)
deltas = deltas[paddle.arange(deltas.shape[0]), labels]
head_out_list.append([scores, deltas, rois])
pred_bbox = self._get_pred_bbox(deltas, rois, self.bbox_weight[i])
if self.training:
loss = {}
for stage, value in enumerate(zip(head_out_list, targets_list)):
(scores, deltas, rois), targets = value
loss_stage = self.get_loss(scores, deltas, targets, rois,
self.bbox_weight[stage])
for k, v in loss_stage.items():
loss[k + "_stage{}".format(
stage)] = v / self.num_cascade_stages
return loss, bbox_feat
else:
scores, deltas, self.refined_rois = self.get_prediction(
head_out_list)
return (deltas, scores), self.head
```
###Assistant :
body_feats (list[Tensor]): Feature maps from backbone
rois (Tensor): RoIs generated from RPN module
rois_num (Tensor): The number of RoIs in each image
inputs (dict{Tensor}): The ground-truth of image
|
1,169 | def get_fws(value):
newvalue = value.lstrip()
fws = WhiteSpaceTerminal(value[:len(value)-len(newvalue)], 'fws')
return fws, newvalue
| FWS = 1*WSP
This isn't the RFC definition. We're using fws to represent tokens where
folding can be done, but when we are parsing the *un*folding has already
been done so we don't need to watch out for CRLF.
| 39 | 12 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def get_fws(value):
newvalue = value.lstrip()
fws = WhiteSpaceTerminal(value[:len(value)-len(newvalue)], 'fws')
return fws, newvalue
```
###Assistant : FWS = 1*WSP
This isn't the RFC definition. We're using fws to represent tokens where
folding can be done, but when we are parsing the *un*folding has already
been done so we don't need to watch out for CRLF.
|
1,170 | def load_data_snli(batch_size, num_steps=50):
num_workers = d2l.get_dataloader_workers()
data_dir = d2l.download_extract('SNLI')
train_data = read_snli(data_dir, True)
test_data = read_snli(data_dir, False)
train_set = SNLIDataset(train_data, num_steps)
test_set = SNLIDataset(test_data, num_steps, train_set.vocab)
train_iter = gluon.data.DataLoader(train_set, batch_size, shuffle=True,
num_workers=num_workers)
test_iter = gluon.data.DataLoader(test_set, batch_size, shuffle=False,
num_workers=num_workers)
return train_iter, test_iter, train_set.vocab
| Download the SNLI dataset and return data iterators and vocabulary.
Defined in :numref:`sec_natural-language-inference-and-dataset` | 13 | 42 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def load_data_snli(batch_size, num_steps=50):
num_workers = d2l.get_dataloader_workers()
data_dir = d2l.download_extract('SNLI')
train_data = read_snli(data_dir, True)
test_data = read_snli(data_dir, False)
train_set = SNLIDataset(train_data, num_steps)
test_set = SNLIDataset(test_data, num_steps, train_set.vocab)
train_iter = gluon.data.DataLoader(train_set, batch_size, shuffle=True,
num_workers=num_workers)
test_iter = gluon.data.DataLoader(test_set, batch_size, shuffle=False,
num_workers=num_workers)
return train_iter, test_iter, train_set.vocab
```
###Assistant : Download the SNLI dataset and return data iterators and vocabulary.
Defined in :numref:`sec_natural-language-inference-and-dataset`
|
1,171 | def plot_feature_importance(model, feature_names, pair, train_dir, count_max=50) -> None:
try:
import plotly.graph_objects as go
from plotly.subplots import make_subplots
except ImportError:
logger.exception("Module plotly not found \n Please install using `pip3 install plotly`")
exit(1)
from freqtrade.plot.plotting import store_plot_file
# Gather feature importance from model
if "catboost.core" in str(model.__class__):
feature_importance = model.get_feature_importance()
elif "lightgbm.sklearn" in str(model.__class__):
feature_importance = model.feature_importances_
else:
raise NotImplementedError(f"Cannot extract feature importance for {model.__class__}")
# Data preparation
fi_df = pd.DataFrame({
"feature_names": np.array(feature_names),
"feature_importance": np.array(feature_importance)
})
fi_df_top = fi_df.nlargest(count_max, "feature_importance")[::-1]
fi_df_worst = fi_df.nsmallest(count_max, "feature_importance")[::-1]
# Plotting |
Plot Best and Worst Features by importance for CatBoost model.
Called once per sub-train.
Usage: plot_feature_importance(
model=model,
feature_names=dk.training_features_list,
pair=pair,
train_dir=dk.data_path)
| 20 | 84 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def plot_feature_importance(model, feature_names, pair, train_dir, count_max=50) -> None:
try:
import plotly.graph_objects as go
from plotly.subplots import make_subplots
except ImportError:
logger.exception("Module plotly not found \n Please install using `pip3 install plotly`")
exit(1)
from freqtrade.plot.plotting import store_plot_file
# Gather feature importance from model
if "catboost.core" in str(model.__class__):
feature_importance = model.get_feature_importance()
elif "lightgbm.sklearn" in str(model.__class__):
feature_importance = model.feature_importances_
else:
raise NotImplementedError(f"Cannot extract feature importance for {model.__class__}")
# Data preparation
fi_df = pd.DataFrame({
"feature_names": np.array(feature_names),
"feature_importance": np.array(feature_importance)
})
fi_df_top = fi_df.nlargest(count_max, "feature_importance")[::-1]
fi_df_worst = fi_df.nsmallest(count_max, "feature_importance")[::-1]
# Plotting
```
###Assistant :
Plot Best and Worst Features by importance for CatBoost model.
Called once per sub-train.
Usage: plot_feature_importance(
model=model,
feature_names=dk.training_features_list,
pair=pair,
train_dir=dk.data_path)
|
1,172 | def get_primary_key_column(self, cursor, table_name):
cursor.execute(
"PRAGMA table_info(%s)" % self.connection.ops.quote_name(table_name)
)
for _, name, *_, pk in cursor.fetchall():
if pk:
return name
return None
| Return the column name of the primary key for the given table. | 12 | 23 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def get_primary_key_column(self, cursor, table_name):
cursor.execute(
"PRAGMA table_info(%s)" % self.connection.ops.quote_name(table_name)
)
for _, name, *_, pk in cursor.fetchall():
if pk:
return name
return None
```
###Assistant : Return the column name of the primary key for the given table.
|
1,173 | def validate_settings():
try:
django_backend = [x for x in settings.TEMPLATES
if x['BACKEND'] == 'django.template.backends.django.DjangoTemplates'][0]
except IndexError:
raise ImproperlyConfigured(
"django CMS requires django.template.context_processors.request in "
"'django.template.backends.django.DjangoTemplates' context processors."
)
context_processors = django_backend.get('OPTIONS', {}).get('context_processors', [])
if ('django.core.context_processors.request' not in context_processors and # noqa: W504
'django.template.context_processors.request' not in context_processors):
raise ImproperlyConfigured("django CMS requires django.template.context_processors.request in "
"'django.template.backends.django.DjangoTemplates' context processors.")
if (
hasattr(settings, "SEND_BROKEN_LINK_EMAILS") and # noqa: W504
"django.middleware.common.BrokenLinkEmailsMiddleware" not in getattr(settings, "MIDDLEWARE", [])
):
warnings.warn('The setting "SEND_BROKEN_LINK_EMAILS" will not be honored by django CMS as of version 4.1. '
'Add "django.middleware.common.BrokenLinkEmailsMiddleware" to your MIDDLEWARE settings '
'instead.', DeprecationWarning)
|
Check project settings file for required options
| 7 | 95 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def validate_settings():
try:
django_backend = [x for x in settings.TEMPLATES
if x['BACKEND'] == 'django.template.backends.django.DjangoTemplates'][0]
except IndexError:
raise ImproperlyConfigured(
"django CMS requires django.template.context_processors.request in "
"'django.template.backends.django.DjangoTemplates' context processors."
)
context_processors = django_backend.get('OPTIONS', {}).get('context_processors', [])
if ('django.core.context_processors.request' not in context_processors and # noqa: W504
'django.template.context_processors.request' not in context_processors):
raise ImproperlyConfigured("django CMS requires django.template.context_processors.request in "
"'django.template.backends.django.DjangoTemplates' context processors.")
if (
hasattr(settings, "SEND_BROKEN_LINK_EMAILS") and # noqa: W504
"django.middleware.common.BrokenLinkEmailsMiddleware" not in getattr(settings, "MIDDLEWARE", [])
):
warnings.warn('The setting "SEND_BROKEN_LINK_EMAILS" will not be honored by django CMS as of version 4.1. '
'Add "django.middleware.common.BrokenLinkEmailsMiddleware" to your MIDDLEWARE settings '
'instead.', DeprecationWarning)
```
###Assistant :
Check project settings file for required options
|
1,174 | def get(cls):
min_partition_size = super().get()
assert min_partition_size > 0, "`min_partition_size` should be > 0"
return min_partition_size
|
Get ``MinPartitionSize`` with extra checks.
Returns
-------
int
| 8 | 16 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def get(cls):
min_partition_size = super().get()
assert min_partition_size > 0, "`min_partition_size` should be > 0"
return min_partition_size
```
###Assistant :
Get ``MinPartitionSize`` with extra checks.
Returns
-------
int
|
1,175 | def pbt_function(config):
lr = config["lr"]
accuracy = 0.0 # end = 1000
start = 0
if session.get_checkpoint():
state = session.get_checkpoint().to_dict()
accuracy = state["acc"]
start = state["step"]
midpoint = 100 # lr starts decreasing after acc > midpoint
q_tolerance = 3 # penalize exceeding lr by more than this multiple
noise_level = 2 # add gaussian noise to the acc increase
# triangle wave:
# - start at 0.001 @ t=0,
# - peak at 0.01 @ t=midpoint,
# - end at 0.001 @ t=midpoint * 2,
for step in range(start, 100):
if accuracy < midpoint:
optimal_lr = 0.01 * accuracy / midpoint
else:
optimal_lr = 0.01 - 0.01 * (accuracy - midpoint) / midpoint
optimal_lr = min(0.01, max(0.001, optimal_lr))
# compute accuracy increase
q_err = max(lr, optimal_lr) / min(lr, optimal_lr)
if q_err < q_tolerance:
accuracy += (1.0 / q_err) * random.random()
elif lr > optimal_lr:
accuracy -= (q_err - q_tolerance) * random.random()
accuracy += noise_level * np.random.normal()
accuracy = max(0, accuracy)
checkpoint = None
if step % 3 == 0:
checkpoint = Checkpoint.from_dict({"acc": accuracy, "step": start})
session.report(
{
"mean_accuracy": accuracy,
"cur_lr": lr,
"optimal_lr": optimal_lr, # for debugging
"q_err": q_err, # for debugging
"done": accuracy > midpoint * 2, # this stops the training process
},
checkpoint=checkpoint,
)
| Toy PBT problem for benchmarking adaptive learning rate.
The goal is to optimize this trainable's accuracy. The accuracy increases
fastest at the optimal lr, which is a function of the current accuracy.
The optimal lr schedule for this problem is the triangle wave as follows.
Note that many lr schedules for real models also follow this shape:
best lr
^
| /\
| / \
| / \
| / \
------------> accuracy
In this problem, using PBT with a population of 2-4 is sufficient to
roughly approximate this lr schedule. Higher population sizes will yield
faster convergence. Training will not converge without PBT.
| 104 | 207 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def pbt_function(config):
lr = config["lr"]
accuracy = 0.0 # end = 1000
start = 0
if session.get_checkpoint():
state = session.get_checkpoint().to_dict()
accuracy = state["acc"]
start = state["step"]
midpoint = 100 # lr starts decreasing after acc > midpoint
q_tolerance = 3 # penalize exceeding lr by more than this multiple
noise_level = 2 # add gaussian noise to the acc increase
# triangle wave:
# - start at 0.001 @ t=0,
# - peak at 0.01 @ t=midpoint,
# - end at 0.001 @ t=midpoint * 2,
for step in range(start, 100):
if accuracy < midpoint:
optimal_lr = 0.01 * accuracy / midpoint
else:
optimal_lr = 0.01 - 0.01 * (accuracy - midpoint) / midpoint
optimal_lr = min(0.01, max(0.001, optimal_lr))
# compute accuracy increase
q_err = max(lr, optimal_lr) / min(lr, optimal_lr)
if q_err < q_tolerance:
accuracy += (1.0 / q_err) * random.random()
elif lr > optimal_lr:
accuracy -= (q_err - q_tolerance) * random.random()
accuracy += noise_level * np.random.normal()
accuracy = max(0, accuracy)
checkpoint = None
if step % 3 == 0:
checkpoint = Checkpoint.from_dict({"acc": accuracy, "step": start})
session.report(
{
"mean_accuracy": accuracy,
"cur_lr": lr,
"optimal_lr": optimal_lr, # for debugging
"q_err": q_err, # for debugging
"done": accuracy > midpoint * 2, # this stops the training process
},
checkpoint=checkpoint,
)
```
###Assistant : Toy PBT problem for benchmarking adaptive learning rate.
The goal is to optimize this trainable's accuracy. The accuracy increases
fastest at the optimal lr, which is a function of the current accuracy.
The optimal lr schedule for this problem is the triangle wave as follows.
Note that many lr schedules for real models also follow this shape:
best lr
^
| /\
| / \
| / \
| / \
------------> accuracy
In this problem, using PBT with a population of 2-4 is sufficient to
roughly approximate this lr schedule. Higher population sizes will yield
faster convergence. Training will not converge without PBT.
|
1,176 | def add_hedge_option(price, implied_volatility, strike, days, side):
# Determine delta position given the option
delta = calc_delta(price, implied_volatility, strike, days, 0, side)
# Determine gamma position given the option
gamma = calc_gamma(price, implied_volatility, strike, days, 0)
# Determine vega position given the option
vega = calc_vega(price, implied_volatility, strike, days, 0)
return delta, gamma, vega
| Determine the delta, gamma and vega value of the portfolio and/or options.
Parameters
----------
price: int
The price.
implied_volatility: float
The implied volatility.
strike: float
The strike price.
days: float
The amount of days until expiration. Use annual notation thus a month would be 30 / 360.
sign: int
Whether you have a long (1) or short (-1) position
Returns
-------
delta: float
gamma: float
portfolio: float
| 67 | 53 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def add_hedge_option(price, implied_volatility, strike, days, side):
# Determine delta position given the option
delta = calc_delta(price, implied_volatility, strike, days, 0, side)
# Determine gamma position given the option
gamma = calc_gamma(price, implied_volatility, strike, days, 0)
# Determine vega position given the option
vega = calc_vega(price, implied_volatility, strike, days, 0)
return delta, gamma, vega
```
###Assistant : Determine the delta, gamma and vega value of the portfolio and/or options.
Parameters
----------
price: int
The price.
implied_volatility: float
The implied volatility.
strike: float
The strike price.
days: float
The amount of days until expiration. Use annual notation thus a month would be 30 / 360.
sign: int
Whether you have a long (1) or short (-1) position
Returns
-------
delta: float
gamma: float
portfolio: float
|
1,177 | def header_encode(self, string):
codec = self.output_codec or 'us-ascii'
header_bytes = _encode(string, codec)
# 7bit/8bit encodings return the string unchanged (modulo conversions)
encoder_module = self._get_encoder(header_bytes)
if encoder_module is None:
return string
return encoder_module.header_encode(header_bytes, codec)
| Header-encode a string by converting it first to bytes.
The type of encoding (base64 or quoted-printable) will be based on
this charset's `header_encoding`.
:param string: A unicode string for the header. It must be possible
to encode this string to bytes using the character set's
output codec.
:return: The encoded string, with RFC 2047 chrome.
| 55 | 33 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def header_encode(self, string):
codec = self.output_codec or 'us-ascii'
header_bytes = _encode(string, codec)
# 7bit/8bit encodings return the string unchanged (modulo conversions)
encoder_module = self._get_encoder(header_bytes)
if encoder_module is None:
return string
return encoder_module.header_encode(header_bytes, codec)
```
###Assistant : Header-encode a string by converting it first to bytes.
The type of encoding (base64 or quoted-printable) will be based on
this charset's `header_encoding`.
:param string: A unicode string for the header. It must be possible
to encode this string to bytes using the character set's
output codec.
:return: The encoded string, with RFC 2047 chrome.
|
1,178 | def data_dict(self, records):
self.version = records[0].replace("File-Date:", "").strip()
dic = {}
dic["deprecated"] = {}
for label in [
"language",
"extlang",
"script",
"region",
"variant",
"redundant",
"grandfathered",
]:
dic["deprecated"][label] = {}
for record in records[1:]:
fields = [field.split(": ") for field in record.strip().split("\n")]
typ = fields[0][1]
tag = fields[1][1]
if typ not in dic:
dic[typ] = {}
subfields = {}
for field in fields[2:]:
if len(field) == 2:
[key, val] = field
if key not in subfields:
subfields[key] = [val]
else: # multiple value
subfields[key].append(val)
else: # multiline field
subfields[key][-1] += " " + field[0].strip()
if (
"Deprecated" not in record
and typ == "language"
and key == "Description"
):
self.langcode[subfields[key][-1]] = tag
for key in subfields:
if len(subfields[key]) == 1: # single value
subfields[key] = subfields[key][0]
if "Deprecated" in record:
dic["deprecated"][typ][tag] = subfields
else:
dic[typ][tag] = subfields
return dic
| Convert the BCP-47 language subtag registry to a dictionary | 9 | 137 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def data_dict(self, records):
self.version = records[0].replace("File-Date:", "").strip()
dic = {}
dic["deprecated"] = {}
for label in [
"language",
"extlang",
"script",
"region",
"variant",
"redundant",
"grandfathered",
]:
dic["deprecated"][label] = {}
for record in records[1:]:
fields = [field.split(": ") for field in record.strip().split("\n")]
typ = fields[0][1]
tag = fields[1][1]
if typ not in dic:
dic[typ] = {}
subfields = {}
for field in fields[2:]:
if len(field) == 2:
[key, val] = field
if key not in subfields:
subfields[key] = [val]
else: # multiple value
subfields[key].append(val)
else: # multiline field
subfields[key][-1] += " " + field[0].strip()
if (
"Deprecated" not in record
and typ == "language"
and key == "Description"
):
self.langcode[subfields[key][-1]] = tag
for key in subfields:
if len(subfields[key]) == 1: # single value
subfields[key] = subfields[key][0]
if "Deprecated" in record:
dic["deprecated"][typ][tag] = subfields
else:
dic[typ][tag] = subfields
return dic
```
###Assistant : Convert the BCP-47 language subtag registry to a dictionary
|
1,179 | def test_async_add_hass_job_schedule_partial_coroutinefunction(event_loop):
hass = MagicMock(loop=MagicMock(wraps=event_loop))
| Test that we schedule partial coros and add jobs to the job pool. | 13 | 5 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def test_async_add_hass_job_schedule_partial_coroutinefunction(event_loop):
hass = MagicMock(loop=MagicMock(wraps=event_loop))
```
###Assistant : Test that we schedule partial coros and add jobs to the job pool.
|
1,180 | def to_label_objs(self, answer_type="generative"):
df_labels = self.df[["id", "question", "answer_text", "answer_start", "context", "document_id"]]
record_dicts = df_labels.to_dict("records")
labels = [
Label(
query=record["question"],
answer=Answer(answer=record["answer_text"], answer_type=answer_type),
is_correct_answer=True,
is_correct_document=True,
id=record["id"],
origin=record.get("origin", "gold-label"),
document=Document(content=record.get("context"), id=str(record["document_id"])),
)
for record in record_dicts
]
return labels
| Export all labels stored in this object to haystack.Label objects | 10 | 36 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def to_label_objs(self, answer_type="generative"):
df_labels = self.df[["id", "question", "answer_text", "answer_start", "context", "document_id"]]
record_dicts = df_labels.to_dict("records")
labels = [
Label(
query=record["question"],
answer=Answer(answer=record["answer_text"], answer_type=answer_type),
is_correct_answer=True,
is_correct_document=True,
id=record["id"],
origin=record.get("origin", "gold-label"),
document=Document(content=record.get("context"), id=str(record["document_id"])),
)
for record in record_dicts
]
return labels
```
###Assistant : Export all labels stored in this object to haystack.Label objects
|
1,181 | def test_api_get_storage_path(self):
response = self.client.get("/api/storage_paths/", format="json")
self.assertEqual(response.status_code, 200)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.data["count"], 1)
resp_storage_path = response.data["results"][0]
self.assertEqual(resp_storage_path["id"], self.sp1.id)
self.assertEqual(resp_storage_path["path"], self.sp1.path)
|
GIVEN:
- API request to get all storage paths
WHEN:
- API is called
THEN:
- Existing storage paths are returned
| 21 | 19 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def test_api_get_storage_path(self):
response = self.client.get("/api/storage_paths/", format="json")
self.assertEqual(response.status_code, 200)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.data["count"], 1)
resp_storage_path = response.data["results"][0]
self.assertEqual(resp_storage_path["id"], self.sp1.id)
self.assertEqual(resp_storage_path["path"], self.sp1.path)
```
###Assistant :
GIVEN:
- API request to get all storage paths
WHEN:
- API is called
THEN:
- Existing storage paths are returned
|
1,182 | def test_normalize_metric_warning():
msg = "Normalized stress is not supported"
sim = np.array([[0, 5, 3, 4], [5, 0, 2, 2], [3, 2, 0, 1], [4, 2, 1, 0]])
with pytest.raises(ValueError, match=msg):
mds.smacof(sim, metric=True, normalized_stress=True)
|
Test that a UserWarning is emitted when using normalized stress with
metric-MDS.
| 12 | 33 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def test_normalize_metric_warning():
msg = "Normalized stress is not supported"
sim = np.array([[0, 5, 3, 4], [5, 0, 2, 2], [3, 2, 0, 1], [4, 2, 1, 0]])
with pytest.raises(ValueError, match=msg):
mds.smacof(sim, metric=True, normalized_stress=True)
```
###Assistant :
Test that a UserWarning is emitted when using normalized stress with
metric-MDS.
|
1,183 | def delete_and_patch_duplicate_bins():
duplicate_bins = frappe.db.sql(, as_dict=1)
for duplicate_bin in duplicate_bins:
existing_bins = frappe.get_list("Bin",
filters={
"item_code": duplicate_bin.item_code,
"warehouse": duplicate_bin.warehouse
},
fields=["name"],
order_by="creation",)
# keep last one
existing_bins.pop()
for broken_bin in existing_bins:
frappe.delete_doc("Bin", broken_bin.name)
qty_dict = {
"reserved_qty": get_reserved_qty(duplicate_bin.item_code, duplicate_bin.warehouse),
"indented_qty": get_indented_qty(duplicate_bin.item_code, duplicate_bin.warehouse),
"ordered_qty": get_ordered_qty(duplicate_bin.item_code, duplicate_bin.warehouse),
"planned_qty": get_planned_qty(duplicate_bin.item_code, duplicate_bin.warehouse),
"actual_qty": get_balance_qty_from_sle(duplicate_bin.item_code, duplicate_bin.warehouse)
}
update_bin_qty(duplicate_bin.item_code, duplicate_bin.warehouse, qty_dict)
|
SELECT
item_code, warehouse, count(*) as bin_count
FROM
tabBin
GROUP BY
item_code, warehouse
HAVING
bin_count > 1
| 16 | 54 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def delete_and_patch_duplicate_bins():
duplicate_bins = frappe.db.sql(, as_dict=1)
for duplicate_bin in duplicate_bins:
existing_bins = frappe.get_list("Bin",
filters={
"item_code": duplicate_bin.item_code,
"warehouse": duplicate_bin.warehouse
},
fields=["name"],
order_by="creation",)
# keep last one
existing_bins.pop()
for broken_bin in existing_bins:
frappe.delete_doc("Bin", broken_bin.name)
qty_dict = {
"reserved_qty": get_reserved_qty(duplicate_bin.item_code, duplicate_bin.warehouse),
"indented_qty": get_indented_qty(duplicate_bin.item_code, duplicate_bin.warehouse),
"ordered_qty": get_ordered_qty(duplicate_bin.item_code, duplicate_bin.warehouse),
"planned_qty": get_planned_qty(duplicate_bin.item_code, duplicate_bin.warehouse),
"actual_qty": get_balance_qty_from_sle(duplicate_bin.item_code, duplicate_bin.warehouse)
}
update_bin_qty(duplicate_bin.item_code, duplicate_bin.warehouse, qty_dict)
```
###Assistant :
SELECT
item_code, warehouse, count(*) as bin_count
FROM
tabBin
GROUP BY
item_code, warehouse
HAVING
bin_count > 1
|
1,184 | async def test_stop_long_running_job(job_sdk_client):
agent_client, head_client = job_sdk_client
with tempfile.TemporaryDirectory() as tmp_dir:
path = Path(tmp_dir)
driver_script =
test_script_file = path / "test_script.py"
with open(test_script_file, "w+") as file:
file.write(driver_script)
runtime_env = {"working_dir": tmp_dir}
runtime_env = upload_working_dir_if_needed(runtime_env, tmp_dir, logger=logger)
runtime_env = RuntimeEnv(**runtime_env).to_dict()
request = validate_request_type(
{"runtime_env": runtime_env, "entrypoint": "python test_script.py"},
JobSubmitRequest,
)
submit_result = await agent_client.submit_job_internal(request)
job_id = submit_result.submission_id
resp = await agent_client.stop_job_internal(job_id)
assert resp.stopped is True
wait_for_condition(
partial(
_check_job, client=head_client, job_id=job_id, status=JobStatus.STOPPED
),
timeout=10,
)
@pytest.mark.asyncio |
Submit a job that runs for a while and stop it in the middle.
print('Hello !')
import time
time.sleep(300) # This should never finish
raise RuntimeError('Intentionally failed.')
| 27 | 74 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
async def test_stop_long_running_job(job_sdk_client):
agent_client, head_client = job_sdk_client
with tempfile.TemporaryDirectory() as tmp_dir:
path = Path(tmp_dir)
driver_script =
test_script_file = path / "test_script.py"
with open(test_script_file, "w+") as file:
file.write(driver_script)
runtime_env = {"working_dir": tmp_dir}
runtime_env = upload_working_dir_if_needed(runtime_env, tmp_dir, logger=logger)
runtime_env = RuntimeEnv(**runtime_env).to_dict()
request = validate_request_type(
{"runtime_env": runtime_env, "entrypoint": "python test_script.py"},
JobSubmitRequest,
)
submit_result = await agent_client.submit_job_internal(request)
job_id = submit_result.submission_id
resp = await agent_client.stop_job_internal(job_id)
assert resp.stopped is True
wait_for_condition(
partial(
_check_job, client=head_client, job_id=job_id, status=JobStatus.STOPPED
),
timeout=10,
)
@pytest.mark.asyncio
```
###Assistant :
Submit a job that runs for a while and stop it in the middle.
print('Hello !')
import time
time.sleep(300) # This should never finish
raise RuntimeError('Intentionally failed.')
|
1,185 | def print_help(self):
source_txt = CRYPTO_SOURCES.get(self.source, "?") if self.source != "" else ""
help_text = f
console.print(text=help_text, menu="Stocks - Due Diligence")
| Print help[cmds]
load load a specific cryptocurrency for analysis
[param]Coin: [/param]{self.current_coin}
[param]Source: [/param]{source_txt}
[src]Glassnode[/src]
active active addresses
nonzero addresses with non-zero balances
change 30d change of supply held on exchange wallets
eb total balance held on exchanges (in percentage and units)
[src]Coinglass[/src]
oi open interest per exchange
[src]CoinPaprika[/src]
basic basic information about loaded coin
ps price and supply related metrics for loaded coin
mkt all markets for loaded coin
ex all exchanges where loaded coin is listed
twitter tweets for loaded coin
events events related to loaded coin
[src]CoinGecko[/src]
info basic information about loaded coin
market market stats about loaded coin
ath all time high related stats for loaded coin
atl all time low related stats for loaded coin
web found websites for loaded coin e.g forum, homepage
social social portals urls for loaded coin, e.g reddit, twitter
score different kind of scores for loaded coin, e.g developer score, sentiment score
dev github, bitbucket coin development statistics
bc links to blockchain explorers for loaded coin
[src]Binance[/src]
binbook show order book
balance show coin balance
[src]Coinbase[/src]
cbbook show order book
trades show last trades
stats show coin stats[/cmds]
| 187 | 20 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def print_help(self):
source_txt = CRYPTO_SOURCES.get(self.source, "?") if self.source != "" else ""
help_text = f
console.print(text=help_text, menu="Stocks - Due Diligence")
```
###Assistant : Print help[cmds]
load load a specific cryptocurrency for analysis
[param]Coin: [/param]{self.current_coin}
[param]Source: [/param]{source_txt}
[src]Glassnode[/src]
active active addresses
nonzero addresses with non-zero balances
change 30d change of supply held on exchange wallets
eb total balance held on exchanges (in percentage and units)
[src]Coinglass[/src]
oi open interest per exchange
[src]CoinPaprika[/src]
basic basic information about loaded coin
ps price and supply related metrics for loaded coin
mkt all markets for loaded coin
ex all exchanges where loaded coin is listed
twitter tweets for loaded coin
events events related to loaded coin
[src]CoinGecko[/src]
info basic information about loaded coin
market market stats about loaded coin
ath all time high related stats for loaded coin
atl all time low related stats for loaded coin
web found websites for loaded coin e.g forum, homepage
social social portals urls for loaded coin, e.g reddit, twitter
score different kind of scores for loaded coin, e.g developer score, sentiment score
dev github, bitbucket coin development statistics
bc links to blockchain explorers for loaded coin
[src]Binance[/src]
binbook show order book
balance show coin balance
[src]Coinbase[/src]
cbbook show order book
trades show last trades
stats show coin stats[/cmds]
|
1,186 | def test_get_cached_repo_files_with_all_files(self):
responses.add(
method=responses.GET,
url=f"https://api.github.com/repos/{self.repo.name}/git/trees/master?recursive=1",
status=200,
json={
"tree": [
{"type": "blob", "path": "src/foo.py"},
{"type": "blob", "path": "README"},
]
},
)
repo_key = f"github:repo:{self.repo.name}:all"
assert cache.get(repo_key) is None
with mock.patch("sentry.integrations.github.client.get_jwt", return_value=b"jwt_token_1"):
files = self.client.get_cached_repo_files(self.repo.name, "master")
assert files == ["src/foo.py"]
| Fetch files for repo. All files rather than just source code files | 12 | 38 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def test_get_cached_repo_files_with_all_files(self):
responses.add(
method=responses.GET,
url=f"https://api.github.com/repos/{self.repo.name}/git/trees/master?recursive=1",
status=200,
json={
"tree": [
{"type": "blob", "path": "src/foo.py"},
{"type": "blob", "path": "README"},
]
},
)
repo_key = f"github:repo:{self.repo.name}:all"
assert cache.get(repo_key) is None
with mock.patch("sentry.integrations.github.client.get_jwt", return_value=b"jwt_token_1"):
files = self.client.get_cached_repo_files(self.repo.name, "master")
assert files == ["src/foo.py"]
```
###Assistant : Fetch files for repo. All files rather than just source code files
|
1,187 | def check_migrations(self):
from django.db.migrations.executor import MigrationExecutor
try:
executor = MigrationExecutor(connections[DEFAULT_DB_ALIAS])
except ImproperlyConfigured:
# No databases are configured (or the dummy one)
return
plan = executor.migration_plan(executor.loader.graph.leaf_nodes())
if plan:
apps_waiting_migration = sorted(
{migration.app_label for migration, backwards in plan}
)
self.stdout.write(
self.style.NOTICE(
"\nYou have %(unapplied_migration_count)s unapplied migration(s). "
"Your project may not work properly until you apply the "
"migrations for app(s): %(apps_waiting_migration)s."
% {
"unapplied_migration_count": len(plan),
"apps_waiting_migration": ", ".join(apps_waiting_migration),
}
)
)
self.stdout.write(
self.style.NOTICE("Run 'python manage.py migrate' to apply them.")
)
|
Print a warning if the set of migrations on disk don't match the
migrations in the database.
| 17 | 79 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def check_migrations(self):
from django.db.migrations.executor import MigrationExecutor
try:
executor = MigrationExecutor(connections[DEFAULT_DB_ALIAS])
except ImproperlyConfigured:
# No databases are configured (or the dummy one)
return
plan = executor.migration_plan(executor.loader.graph.leaf_nodes())
if plan:
apps_waiting_migration = sorted(
{migration.app_label for migration, backwards in plan}
)
self.stdout.write(
self.style.NOTICE(
"\nYou have %(unapplied_migration_count)s unapplied migration(s). "
"Your project may not work properly until you apply the "
"migrations for app(s): %(apps_waiting_migration)s."
% {
"unapplied_migration_count": len(plan),
"apps_waiting_migration": ", ".join(apps_waiting_migration),
}
)
)
self.stdout.write(
self.style.NOTICE("Run 'python manage.py migrate' to apply them.")
)
```
###Assistant :
Print a warning if the set of migrations on disk don't match the
migrations in the database.
|
1,188 | def find_induced_nodes(G, s, t, treewidth_bound=sys.maxsize):
if not is_chordal(G):
raise nx.NetworkXError("Input graph is not chordal.")
H = nx.Graph(G)
H.add_edge(s, t)
induced_nodes = set()
triplet = _find_chordality_breaker(H, s, treewidth_bound)
while triplet:
(u, v, w) = triplet
induced_nodes.update(triplet)
for n in triplet:
if n != s:
H.add_edge(s, n)
triplet = _find_chordality_breaker(H, s, treewidth_bound)
if induced_nodes:
# Add t and the second node in the induced path from s to t.
induced_nodes.add(t)
for u in G[s]:
if len(induced_nodes & set(G[u])) == 2:
induced_nodes.add(u)
break
return induced_nodes
| Returns the set of induced nodes in the path from s to t.
Parameters
----------
G : graph
A chordal NetworkX graph
s : node
Source node to look for induced nodes
t : node
Destination node to look for induced nodes
treewidth_bound: float
Maximum treewidth acceptable for the graph H. The search
for induced nodes will end as soon as the treewidth_bound is exceeded.
Returns
-------
induced_nodes : Set of nodes
The set of induced nodes in the path from s to t in G
Raises
------
NetworkXError
The algorithm does not support DiGraph, MultiGraph and MultiDiGraph.
If the input graph is an instance of one of these classes, a
:exc:`NetworkXError` is raised.
The algorithm can only be applied to chordal graphs. If the input
graph is found to be non-chordal, a :exc:`NetworkXError` is raised.
Examples
--------
>>> G = nx.Graph()
>>> G = nx.generators.classic.path_graph(10)
>>> induced_nodes = nx.find_induced_nodes(G, 1, 9, 2)
>>> sorted(induced_nodes)
[1, 2, 3, 4, 5, 6, 7, 8, 9]
Notes
-----
G must be a chordal graph and (s,t) an edge that is not in G.
If a treewidth_bound is provided, the search for induced nodes will end
as soon as the treewidth_bound is exceeded.
The algorithm is inspired by Algorithm 4 in [1]_.
A formal definition of induced node can also be found on that reference.
References
----------
.. [1] Learning Bounded Treewidth Bayesian Networks.
Gal Elidan, Stephen Gould; JMLR, 9(Dec):2699--2731, 2008.
http://jmlr.csail.mit.edu/papers/volume9/elidan08a/elidan08a.pdf
| 239 | 82 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def find_induced_nodes(G, s, t, treewidth_bound=sys.maxsize):
if not is_chordal(G):
raise nx.NetworkXError("Input graph is not chordal.")
H = nx.Graph(G)
H.add_edge(s, t)
induced_nodes = set()
triplet = _find_chordality_breaker(H, s, treewidth_bound)
while triplet:
(u, v, w) = triplet
induced_nodes.update(triplet)
for n in triplet:
if n != s:
H.add_edge(s, n)
triplet = _find_chordality_breaker(H, s, treewidth_bound)
if induced_nodes:
# Add t and the second node in the induced path from s to t.
induced_nodes.add(t)
for u in G[s]:
if len(induced_nodes & set(G[u])) == 2:
induced_nodes.add(u)
break
return induced_nodes
```
###Assistant : Returns the set of induced nodes in the path from s to t.
Parameters
----------
G : graph
A chordal NetworkX graph
s : node
Source node to look for induced nodes
t : node
Destination node to look for induced nodes
treewidth_bound: float
Maximum treewidth acceptable for the graph H. The search
for induced nodes will end as soon as the treewidth_bound is exceeded.
Returns
-------
induced_nodes : Set of nodes
The set of induced nodes in the path from s to t in G
Raises
------
NetworkXError
The algorithm does not support DiGraph, MultiGraph and MultiDiGraph.
If the input graph is an instance of one of these classes, a
:exc:`NetworkXError` is raised.
The algorithm can only be applied to chordal graphs. If the input
graph is found to be non-chordal, a :exc:`NetworkXError` is raised.
Examples
--------
>>> G = nx.Graph()
>>> G = nx.generators.classic.path_graph(10)
>>> induced_nodes = nx.find_induced_nodes(G, 1, 9, 2)
>>> sorted(induced_nodes)
[1, 2, 3, 4, 5, 6, 7, 8, 9]
Notes
-----
G must be a chordal graph and (s,t) an edge that is not in G.
If a treewidth_bound is provided, the search for induced nodes will end
as soon as the treewidth_bound is exceeded.
The algorithm is inspired by Algorithm 4 in [1]_.
A formal definition of induced node can also be found on that reference.
References
----------
.. [1] Learning Bounded Treewidth Bayesian Networks.
Gal Elidan, Stephen Gould; JMLR, 9(Dec):2699--2731, 2008.
http://jmlr.csail.mit.edu/papers/volume9/elidan08a/elidan08a.pdf
|
1,189 | def any_skipna_inferred_dtype(request):
inferred_dtype, values = request.param
values = np.array(values, dtype=object) # object dtype to avoid casting
# correctness of inference tested in tests/dtypes/test_inference.py
return inferred_dtype, values
# ----------------------------------------------------------------
# Misc
# ----------------------------------------------------------------
@pytest.fixture |
Fixture for all inferred dtypes from _libs.lib.infer_dtype
The covered (inferred) types are:
* 'string'
* 'empty'
* 'bytes'
* 'mixed'
* 'mixed-integer'
* 'mixed-integer-float'
* 'floating'
* 'integer'
* 'decimal'
* 'boolean'
* 'datetime64'
* 'datetime'
* 'date'
* 'timedelta'
* 'time'
* 'period'
* 'interval'
Returns
-------
inferred_dtype : str
The string for the inferred dtype from _libs.lib.infer_dtype
values : np.ndarray
An array of object dtype that will be inferred to have
`inferred_dtype`
Examples
--------
>>> from pandas._libs import lib
>>>
>>> def test_something(any_skipna_inferred_dtype):
... inferred_dtype, values = any_skipna_inferred_dtype
... # will pass
... assert lib.infer_dtype(values, skipna=True) == inferred_dtype
| 100 | 33 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def any_skipna_inferred_dtype(request):
inferred_dtype, values = request.param
values = np.array(values, dtype=object) # object dtype to avoid casting
# correctness of inference tested in tests/dtypes/test_inference.py
return inferred_dtype, values
# ----------------------------------------------------------------
# Misc
# ----------------------------------------------------------------
@pytest.fixture
```
###Assistant :
Fixture for all inferred dtypes from _libs.lib.infer_dtype
The covered (inferred) types are:
* 'string'
* 'empty'
* 'bytes'
* 'mixed'
* 'mixed-integer'
* 'mixed-integer-float'
* 'floating'
* 'integer'
* 'decimal'
* 'boolean'
* 'datetime64'
* 'datetime'
* 'date'
* 'timedelta'
* 'time'
* 'period'
* 'interval'
Returns
-------
inferred_dtype : str
The string for the inferred dtype from _libs.lib.infer_dtype
values : np.ndarray
An array of object dtype that will be inferred to have
`inferred_dtype`
Examples
--------
>>> from pandas._libs import lib
>>>
>>> def test_something(any_skipna_inferred_dtype):
... inferred_dtype, values = any_skipna_inferred_dtype
... # will pass
... assert lib.infer_dtype(values, skipna=True) == inferred_dtype
|
1,190 | def downsample_2d(x, k=None, factor=2, gain=1, data_format='NCHW', impl='cuda'):
r
assert isinstance(factor, int) and factor >= 1
if k is None:
k = [1] * factor
k = _setup_kernel(k) * gain
p = k.shape[0] - factor
return _simple_upfirdn_2d(x, k, down=factor, pad0=(p+1)//2, pad1=p//2, data_format=data_format, impl=impl)
#----------------------------------------------------------------------------
| Downsample a batch of 2D images with the given filter.
Accepts a batch of 2D images of the shape `[N, C, H, W]` or `[N, H, W, C]`
and downsamples each image with the given filter. The filter is normalized so that
if the input pixels are constant, they will be scaled by the specified `gain`.
Pixels outside the image are assumed to be zero, and the filter is padded with
zeros so that its shape is a multiple of the downsampling factor.
Args:
x: Input tensor of the shape `[N, C, H, W]` or `[N, H, W, C]`.
k: FIR filter of the shape `[firH, firW]` or `[firN]` (separable).
The default is `[1] * factor`, which corresponds to average pooling.
factor: Integer downsampling factor (default: 2).
gain: Scaling factor for signal magnitude (default: 1.0).
data_format: `'NCHW'` or `'NHWC'` (default: `'NCHW'`).
impl: Name of the implementation to use. Can be `"ref"` or `"cuda"` (default).
Returns:
Tensor of the shape `[N, C, H // factor, W // factor]` or
`[N, H // factor, W // factor, C]`, and same datatype as `x`.
| 181 | 43 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def downsample_2d(x, k=None, factor=2, gain=1, data_format='NCHW', impl='cuda'):
r
assert isinstance(factor, int) and factor >= 1
if k is None:
k = [1] * factor
k = _setup_kernel(k) * gain
p = k.shape[0] - factor
return _simple_upfirdn_2d(x, k, down=factor, pad0=(p+1)//2, pad1=p//2, data_format=data_format, impl=impl)
#----------------------------------------------------------------------------
```
###Assistant : Downsample a batch of 2D images with the given filter.
Accepts a batch of 2D images of the shape `[N, C, H, W]` or `[N, H, W, C]`
and downsamples each image with the given filter. The filter is normalized so that
if the input pixels are constant, they will be scaled by the specified `gain`.
Pixels outside the image are assumed to be zero, and the filter is padded with
zeros so that its shape is a multiple of the downsampling factor.
Args:
x: Input tensor of the shape `[N, C, H, W]` or `[N, H, W, C]`.
k: FIR filter of the shape `[firH, firW]` or `[firN]` (separable).
The default is `[1] * factor`, which corresponds to average pooling.
factor: Integer downsampling factor (default: 2).
gain: Scaling factor for signal magnitude (default: 1.0).
data_format: `'NCHW'` or `'NHWC'` (default: `'NCHW'`).
impl: Name of the implementation to use. Can be `"ref"` or `"cuda"` (default).
Returns:
Tensor of the shape `[N, C, H // factor, W // factor]` or
`[N, H // factor, W // factor, C]`, and same datatype as `x`.
|
1,191 | def _check_deprecated_resample_kwargs(kwargs, origin):
# Deprecation warning of `base` and `loffset` since v1.1.0:
# we are raising the warning here to be able to set the `stacklevel`
# properly since we need to raise the `base` and `loffset` deprecation
# warning from three different cases:
# core/generic.py::NDFrame.resample
# core/groupby/groupby.py::GroupBy.resample
# core/groupby/grouper.py::Grouper
# raising these warnings from TimeGrouper directly would fail the test:
# tests/resample/test_deprecated.py::test_deprecating_on_loffset_and_base
if kwargs.get("base", None) is not None:
warnings.warn(
"'base' in .resample() and in Grouper() is deprecated.\n"
"The new arguments that you should use are 'offset' or 'origin'.\n"
'\n>>> df.resample(freq="3s", base=2)\n'
"\nbecomes:\n"
'\n>>> df.resample(freq="3s", offset="2s")\n',
FutureWarning,
stacklevel=find_stack_level(inspect.currentframe()),
)
if kwargs.get("loffset", None) is not None:
warnings.warn(
"'loffset' in .resample() and in Grouper() is deprecated.\n"
'\n>>> df.resample(freq="3s", loffset="8H")\n'
"\nbecomes:\n"
"\n>>> from pandas.tseries.frequencies import to_offset"
'\n>>> df = df.resample(freq="3s").mean()'
'\n>>> df.index = df.index.to_timestamp() + to_offset("8H")\n',
FutureWarning,
stacklevel=find_stack_level(inspect.currentframe()),
)
|
Check for use of deprecated parameters in ``resample`` and related functions.
Raises the appropriate warnings if these parameters are detected.
Only sets an approximate ``stacklevel`` for the warnings (see #37603, #36629).
Parameters
----------
kwargs : dict
Dictionary of keyword arguments to check for deprecated parameters.
origin : object
From where this function is being called; either Grouper or TimeGrouper. Used
to determine an approximate stacklevel.
| 65 | 136 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def _check_deprecated_resample_kwargs(kwargs, origin):
# Deprecation warning of `base` and `loffset` since v1.1.0:
# we are raising the warning here to be able to set the `stacklevel`
# properly since we need to raise the `base` and `loffset` deprecation
# warning from three different cases:
# core/generic.py::NDFrame.resample
# core/groupby/groupby.py::GroupBy.resample
# core/groupby/grouper.py::Grouper
# raising these warnings from TimeGrouper directly would fail the test:
# tests/resample/test_deprecated.py::test_deprecating_on_loffset_and_base
if kwargs.get("base", None) is not None:
warnings.warn(
"'base' in .resample() and in Grouper() is deprecated.\n"
"The new arguments that you should use are 'offset' or 'origin'.\n"
'\n>>> df.resample(freq="3s", base=2)\n'
"\nbecomes:\n"
'\n>>> df.resample(freq="3s", offset="2s")\n',
FutureWarning,
stacklevel=find_stack_level(inspect.currentframe()),
)
if kwargs.get("loffset", None) is not None:
warnings.warn(
"'loffset' in .resample() and in Grouper() is deprecated.\n"
'\n>>> df.resample(freq="3s", loffset="8H")\n'
"\nbecomes:\n"
"\n>>> from pandas.tseries.frequencies import to_offset"
'\n>>> df = df.resample(freq="3s").mean()'
'\n>>> df.index = df.index.to_timestamp() + to_offset("8H")\n',
FutureWarning,
stacklevel=find_stack_level(inspect.currentframe()),
)
```
###Assistant :
Check for use of deprecated parameters in ``resample`` and related functions.
Raises the appropriate warnings if these parameters are detected.
Only sets an approximate ``stacklevel`` for the warnings (see #37603, #36629).
Parameters
----------
kwargs : dict
Dictionary of keyword arguments to check for deprecated parameters.
origin : object
From where this function is being called; either Grouper or TimeGrouper. Used
to determine an approximate stacklevel.
|
1,192 | def get_conn(self) -> DataCatalogClient:
if not self._client:
self._client = DataCatalogClient(credentials=self._get_credentials(), client_info=CLIENT_INFO)
return self._client
| Retrieves client library object that allow access to Cloud Data Catalog service. | 12 | 13 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def get_conn(self) -> DataCatalogClient:
if not self._client:
self._client = DataCatalogClient(credentials=self._get_credentials(), client_info=CLIENT_INFO)
return self._client
```
###Assistant : Retrieves client library object that allow access to Cloud Data Catalog service.
|
1,193 | def get_api_client(self) -> ApiClient:
try:
return new_client_from_config_dict(
config_dict=self.config, context=self.context
)
except ConfigException:
raise
|
Returns an instance of the kubernetes api client with a specific context
| 12 | 13 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def get_api_client(self) -> ApiClient:
try:
return new_client_from_config_dict(
config_dict=self.config, context=self.context
)
except ConfigException:
raise
```
###Assistant :
Returns an instance of the kubernetes api client with a specific context
|
1,194 | def cuts(self) -> list[list[int]]:
if self._cuts is not None:
return self._cuts
width = self.width
height = self.height
screen_region = Region(0, 0, width, height)
cuts_sets = [{0, width} for _ in range(height)]
if self.map is not None:
for region, order, clip in self.map.values():
region = region.intersection(clip)
if region and (region in screen_region):
region_cuts = region.x_extents
for y in region.y_range:
cuts_sets[y].update(region_cuts)
# Sort the cuts for each line
self._cuts = [sorted(cut_set) for cut_set in cuts_sets]
return self._cuts
| Get vertical cuts.
A cut is every point on a line where a widget starts or ends.
Returns:
list[list[int]]: A list of cuts for every line.
| 26 | 75 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def cuts(self) -> list[list[int]]:
if self._cuts is not None:
return self._cuts
width = self.width
height = self.height
screen_region = Region(0, 0, width, height)
cuts_sets = [{0, width} for _ in range(height)]
if self.map is not None:
for region, order, clip in self.map.values():
region = region.intersection(clip)
if region and (region in screen_region):
region_cuts = region.x_extents
for y in region.y_range:
cuts_sets[y].update(region_cuts)
# Sort the cuts for each line
self._cuts = [sorted(cut_set) for cut_set in cuts_sets]
return self._cuts
```
###Assistant : Get vertical cuts.
A cut is every point on a line where a widget starts or ends.
Returns:
list[list[int]]: A list of cuts for every line.
|
1,195 | def is_subclassed(layer):
return (
layer.__module__.find("keras.engine") == -1
and layer.__module__.find("keras.layers") == -1
)
| Returns True if the object is a subclassed layer or subclassed model. | 12 | 12 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def is_subclassed(layer):
return (
layer.__module__.find("keras.engine") == -1
and layer.__module__.find("keras.layers") == -1
)
```
###Assistant : Returns True if the object is a subclassed layer or subclassed model.
|
1,196 | def _mat(self):
sympy_deprecation_warning(
,
deprecated_since_version="1.9",
active_deprecations_target="deprecated-private-matrix-attributes"
)
return self.flat()
|
The private _mat attribute of Matrix is deprecated. Use the
.flat() method instead.
| 13 | 9 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def _mat(self):
sympy_deprecation_warning(
,
deprecated_since_version="1.9",
active_deprecations_target="deprecated-private-matrix-attributes"
)
return self.flat()
```
###Assistant :
The private _mat attribute of Matrix is deprecated. Use the
.flat() method instead.
|
1,197 | def test_missing_required_field(self):
cf3 = CustomField(type=CustomFieldTypeChoices.TYPE_TEXT, name='baz', required=True)
cf3.save()
cf3.content_types.set([ContentType.objects.get_for_model(Site)])
site = Site(name='Test Site', slug='test-site')
# Set custom field data with a required field omitted
site.custom_field_data['foo'] = 'abc'
with self.assertRaises(ValidationError):
site.clean()
site.custom_field_data['baz'] = 'def'
site.clean()
|
Check that a ValidationError is raised if any required custom fields are not present.
| 14 | 34 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def test_missing_required_field(self):
cf3 = CustomField(type=CustomFieldTypeChoices.TYPE_TEXT, name='baz', required=True)
cf3.save()
cf3.content_types.set([ContentType.objects.get_for_model(Site)])
site = Site(name='Test Site', slug='test-site')
# Set custom field data with a required field omitted
site.custom_field_data['foo'] = 'abc'
with self.assertRaises(ValidationError):
site.clean()
site.custom_field_data['baz'] = 'def'
site.clean()
```
###Assistant :
Check that a ValidationError is raised if any required custom fields are not present.
|
1,198 | def test_basic(self):
context = Context({})
template =
expected =
self.assertHTMLEqual(expected, Template(template).render(context))
|
{% load wagtailadmin_tags %}
{% fragment as my_fragment %}
<p>Hello, World</p>
{% endfragment %}
Text coming after:
{{ my_fragment }}
Text coming after:
<p>Hello, World</p>
| 25 | 11 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def test_basic(self):
context = Context({})
template =
expected =
self.assertHTMLEqual(expected, Template(template).render(context))
```
###Assistant :
{% load wagtailadmin_tags %}
{% fragment as my_fragment %}
<p>Hello, World</p>
{% endfragment %}
Text coming after:
{{ my_fragment }}
Text coming after:
<p>Hello, World</p>
|
1,199 | def to_dense(self) -> Series:
from pandas import Series
return Series(
self._parent.array.to_dense(),
index=self._parent.index,
name=self._parent.name,
)
|
Convert a Series from sparse values to dense.
.. versionadded:: 0.25.0
Returns
-------
Series:
A Series with the same values, stored as a dense array.
Examples
--------
>>> series = pd.Series(pd.arrays.SparseArray([0, 1, 0]))
>>> series
0 0
1 1
2 0
dtype: Sparse[int64, 0]
>>> series.sparse.to_dense()
0 0
1 1
2 0
dtype: int64
| 54 | 14 | Python |
###User : Below is a Python method which does a task. Create a documentation for the below code :
```Python
def to_dense(self) -> Series:
from pandas import Series
return Series(
self._parent.array.to_dense(),
index=self._parent.index,
name=self._parent.name,
)
```
###Assistant :
Convert a Series from sparse values to dense.
.. versionadded:: 0.25.0
Returns
-------
Series:
A Series with the same values, stored as a dense array.
Examples
--------
>>> series = pd.Series(pd.arrays.SparseArray([0, 1, 0]))
>>> series
0 0
1 1
2 0
dtype: Sparse[int64, 0]
>>> series.sparse.to_dense()
0 0
1 1
2 0
dtype: int64
|