Unnamed: 0
int64
0
2.93k
code
stringlengths
101
62.2k
docs
stringlengths
51
10.7k
doc_len
int64
4
1.74k
words
int64
4
4.82k
lang
stringclasses
1 value
prompt
stringlengths
320
71.2k
1,200
def replaceHTMLEntity(t): return _htmlEntityMap.get(t.entity) # it's easy to get these comment structures wrong - they're very common, so may as well make them available cStyleComment = Combine(Regex(r"/\*(?:[^*]|\*(?!/))*") + '*/').setName("C style comment") "Comment of the form ``/* ... */``" htmlComment = Regex(r"<!--[\s\S]*?-->").setName("HTML comment") "Comment of the form ``<!-- ... -->``" restOfLine = Regex(r".*").leaveWhitespace().setName("rest of line") dblSlashComment = Regex(r"//(?:\\\n|[^\n])*").setName("// comment") "Comment of the form ``// ... (to end of line)``" cppStyleComment = Combine(Regex(r"/\*(?:[^*]|\*(?!/))*") + '*/' | dblSlashComment).setName("C++ style comment") "Comment of either form :class:`cStyleComment` or :class:`dblSlashComment`" javaStyleComment = cppStyleComment "Same as :class:`cppStyleComment`" pythonStyleComment = Regex(r"#.*").setName("Python style comment") "Comment of the form ``# ... (to end of line)``" _commasepitem = Combine(OneOrMore(Word(printables, excludeChars=',') + Optional(Word(" \t") + ~Literal(",") + ~LineEnd()))).streamline().setName("commaItem") commaSeparatedList = delimitedList(Optional(quotedString.copy() | _commasepitem, default="")).setName("commaSeparatedList") # some other useful expressions - using lower-case class name since we are really using this as a namespace
Helper parser action to replace common HTML entities with their special characters(Deprecated) Predefined expression of 1 or more printable words or quoted strings, separated by commas. This expression is deprecated in favor of :class:`pyparsing_common.comma_separated_list`.
34
141
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def replaceHTMLEntity(t): return _htmlEntityMap.get(t.entity) # it's easy to get these comment structures wrong - they're very common, so may as well make them available cStyleComment = Combine(Regex(r"/\*(?:[^*]|\*(?!/))*") + '*/').setName("C style comment") "Comment of the form ``/* ... */``" htmlComment = Regex(r"<!--[\s\S]*?-->").setName("HTML comment") "Comment of the form ``<!-- ... -->``" restOfLine = Regex(r".*").leaveWhitespace().setName("rest of line") dblSlashComment = Regex(r"//(?:\\\n|[^\n])*").setName("// comment") "Comment of the form ``// ... (to end of line)``" cppStyleComment = Combine(Regex(r"/\*(?:[^*]|\*(?!/))*") + '*/' | dblSlashComment).setName("C++ style comment") "Comment of either form :class:`cStyleComment` or :class:`dblSlashComment`" javaStyleComment = cppStyleComment "Same as :class:`cppStyleComment`" pythonStyleComment = Regex(r"#.*").setName("Python style comment") "Comment of the form ``# ... (to end of line)``" _commasepitem = Combine(OneOrMore(Word(printables, excludeChars=',') + Optional(Word(" \t") + ~Literal(",") + ~LineEnd()))).streamline().setName("commaItem") commaSeparatedList = delimitedList(Optional(quotedString.copy() | _commasepitem, default="")).setName("commaSeparatedList") # some other useful expressions - using lower-case class name since we are really using this as a namespace ``` ###Assistant : Helper parser action to replace common HTML entities with their special characters(Deprecated) Predefined expression of 1 or more printable words or quoted strings, separated by commas. This expression is deprecated in favor of :class:`pyparsing_common.comma_separated_list`.
1,201
def read_csv_with_nan(path, nan_percent=0.0): df = pd.read_csv(path) if nan_percent > 0: num_rows = len(df) for col in df.columns: for row in random.sample(range(num_rows), int(round(nan_percent * num_rows))): df[col].iloc[row] = np.nan return df
Converts `nan_percent` of samples in each row of the CSV at `path` to NaNs.
14
29
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def read_csv_with_nan(path, nan_percent=0.0): df = pd.read_csv(path) if nan_percent > 0: num_rows = len(df) for col in df.columns: for row in random.sample(range(num_rows), int(round(nan_percent * num_rows))): df[col].iloc[row] = np.nan return df ``` ###Assistant : Converts `nan_percent` of samples in each row of the CSV at `path` to NaNs.
1,202
def _upsample_2d(self, x, w=None, k=None, factor=2, gain=1): assert isinstance(factor, int) and factor >= 1 # Setup filter kernel. if k is None: k = [1] * factor # setup kernel k = np.asarray(k, dtype=np.float32) if k.ndim == 1: k = np.outer(k, k) k /= np.sum(k) k = k * (gain * (factor**2)) if self.use_conv: convH = w.shape[2] convW = w.shape[3] inC = w.shape[1] p = (k.shape[0] - factor) - (convW - 1) stride = (factor, factor) # Determine data dimensions. stride = [1, 1, factor, factor] output_shape = ((x.shape[2] - 1) * factor + convH, (x.shape[3] - 1) * factor + convW) output_padding = ( output_shape[0] - (x.shape[2] - 1) * stride[0] - convH, output_shape[1] - (x.shape[3] - 1) * stride[1] - convW, ) assert output_padding[0] >= 0 and output_padding[1] >= 0 inC = w.shape[1] num_groups = x.shape[1] // inC # Transpose weights. w = paddle.reshape(w, (num_groups, -1, inC, convH, convW)) w = w[..., ::-1, ::-1].transpose([0, 2, 1, 3, 4]) w = paddle.reshape(w, (num_groups * inC, -1, convH, convW)) x = F.conv2d_transpose(x, w, stride=stride, output_padding=output_padding, padding=0) x = upfirdn2d_native(x, paddle.to_tensor(k), pad=((p + 1) // 2 + factor - 1, p // 2 + 1)) else: p = k.shape[0] - factor x = upfirdn2d_native(x, paddle.to_tensor(k), up=factor, pad=((p + 1) // 2 + factor - 1, p // 2)) return x
Fused `upsample_2d()` followed by `Conv2d()`. Args: Padding is performed only once at the beginning, not between the operations. The fused op is considerably more efficient than performing the same calculation using standard TensorFlow ops. It supports gradients of arbitrary: order. x: Input tensor of the shape `[N, C, H, W]` or `[N, H, W, C]`. w: Weight tensor of the shape `[filterH, filterW, inChannels, outChannels]`. Grouped convolution can be performed by `inChannels = x.shape[0] // numGroups`. k: FIR filter of the shape `[firH, firW]` or `[firN]` (separable). The default is `[1] * factor`, which corresponds to nearest-neighbor upsampling. factor: Integer upsampling factor (default: 2). gain: Scaling factor for signal magnitude (default: 1.0). Returns: Tensor of the shape `[N, C, H * factor, W * factor]` or `[N, H * factor, W * factor, C]`, and same datatype as `x`.
139
219
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _upsample_2d(self, x, w=None, k=None, factor=2, gain=1): assert isinstance(factor, int) and factor >= 1 # Setup filter kernel. if k is None: k = [1] * factor # setup kernel k = np.asarray(k, dtype=np.float32) if k.ndim == 1: k = np.outer(k, k) k /= np.sum(k) k = k * (gain * (factor**2)) if self.use_conv: convH = w.shape[2] convW = w.shape[3] inC = w.shape[1] p = (k.shape[0] - factor) - (convW - 1) stride = (factor, factor) # Determine data dimensions. stride = [1, 1, factor, factor] output_shape = ((x.shape[2] - 1) * factor + convH, (x.shape[3] - 1) * factor + convW) output_padding = ( output_shape[0] - (x.shape[2] - 1) * stride[0] - convH, output_shape[1] - (x.shape[3] - 1) * stride[1] - convW, ) assert output_padding[0] >= 0 and output_padding[1] >= 0 inC = w.shape[1] num_groups = x.shape[1] // inC # Transpose weights. w = paddle.reshape(w, (num_groups, -1, inC, convH, convW)) w = w[..., ::-1, ::-1].transpose([0, 2, 1, 3, 4]) w = paddle.reshape(w, (num_groups * inC, -1, convH, convW)) x = F.conv2d_transpose(x, w, stride=stride, output_padding=output_padding, padding=0) x = upfirdn2d_native(x, paddle.to_tensor(k), pad=((p + 1) // 2 + factor - 1, p // 2 + 1)) else: p = k.shape[0] - factor x = upfirdn2d_native(x, paddle.to_tensor(k), up=factor, pad=((p + 1) // 2 + factor - 1, p // 2)) return x ``` ###Assistant : Fused `upsample_2d()` followed by `Conv2d()`. Args: Padding is performed only once at the beginning, not between the operations. The fused op is considerably more efficient than performing the same calculation using standard TensorFlow ops. It supports gradients of arbitrary: order. x: Input tensor of the shape `[N, C, H, W]` or `[N, H, W, C]`. w: Weight tensor of the shape `[filterH, filterW, inChannels, outChannels]`. Grouped convolution can be performed by `inChannels = x.shape[0] // numGroups`. k: FIR filter of the shape `[firH, firW]` or `[firN]` (separable). The default is `[1] * factor`, which corresponds to nearest-neighbor upsampling. factor: Integer upsampling factor (default: 2). gain: Scaling factor for signal magnitude (default: 1.0). Returns: Tensor of the shape `[N, C, H * factor, W * factor]` or `[N, H * factor, W * factor, C]`, and same datatype as `x`.
1,203
def get_network_names(self) -> t.Optional[t.List[str]]: if self.networks is None: return None return sorted(self.networks)
Return a list of the network names the container is attached to.
12
12
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_network_names(self) -> t.Optional[t.List[str]]: if self.networks is None: return None return sorted(self.networks) ``` ###Assistant : Return a list of the network names the container is attached to.
1,204
def get_delivered_items_cost(): dn_items = frappe.db.sql( , as_dict=1, ) si_items = frappe.db.sql( , as_dict=1, ) dn_item_map = {} for item in dn_items: dn_item_map.setdefault(item.project, item.amount) for item in si_items: dn_item_map.setdefault(item.project, item.amount) return dn_item_map
select dn.project, sum(dn_item.base_net_amount) as amount from `tabDelivery Note` dn, `tabDelivery Note Item` dn_item where dn.name = dn_item.parent and dn.docstatus = 1 and ifnull(dn.project, '') != '' group by dn.projectselect si.project, sum(si_item.base_net_amount) as amount from `tabSales Invoice` si, `tabSales Invoice Item` si_item where si.name = si_item.parent and si.docstatus = 1 and si.update_stock = 1 and si.is_pos = 1 and ifnull(si.project, '') != '' group by si.project
65
31
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_delivered_items_cost(): dn_items = frappe.db.sql( , as_dict=1, ) si_items = frappe.db.sql( , as_dict=1, ) dn_item_map = {} for item in dn_items: dn_item_map.setdefault(item.project, item.amount) for item in si_items: dn_item_map.setdefault(item.project, item.amount) return dn_item_map ``` ###Assistant : select dn.project, sum(dn_item.base_net_amount) as amount from `tabDelivery Note` dn, `tabDelivery Note Item` dn_item where dn.name = dn_item.parent and dn.docstatus = 1 and ifnull(dn.project, '') != '' group by dn.projectselect si.project, sum(si_item.base_net_amount) as amount from `tabSales Invoice` si, `tabSales Invoice Item` si_item where si.name = si_item.parent and si.docstatus = 1 and si.update_stock = 1 and si.is_pos = 1 and ifnull(si.project, '') != '' group by si.project
1,205
def __new__(cls, *args, **kw_args): is_canon_bp = kw_args.get('is_canon_bp', False) args = list(map(_sympify, args)) free = [get_free_indices(arg) for arg in args] free = set(itertools.chain(*free)) #flatten free newargs = [] for arg in args: dum_this = set(get_dummy_indices(arg)) dum_other = [get_dummy_indices(a) for a in newargs] dum_other = set(itertools.chain(*dum_other)) #flatten dum_other free_this = set(get_free_indices(arg)) if len(dum_this.intersection(free)) > 0: exclude = free_this.union(free, dum_other) newarg = TensMul._dedupe_indices(arg, exclude, arg._index_structure) else: newarg = arg newargs.append(newarg) args = newargs # Flatten: args = [i for arg in args for i in (arg.args if isinstance(arg, (TensMul, Mul)) else [arg])] args, indices, free, dum = TensMul._tensMul_contract_indices(args, replace_indices=False) # Data for indices: index_types = [i.tensor_index_type for i in indices] index_structure = _IndexStructure(free, dum, index_types, indices, canon_bp=is_canon_bp) obj = TensExpr.__new__(cls, *args) obj._indices = indices obj._index_types = index_types[:] obj._index_structure = index_structure obj._free = index_structure.free[:] obj._dum = index_structure.dum[:] obj._free_indices = {x[0] for x in obj.free} obj._rank = len(obj.free) obj._ext_rank = len(obj._index_structure.free) + 2*len(obj._index_structure.dum) obj._coeff = S.One obj._is_canon_bp = is_canon_bp return obj index_types = property(lambda self: self._index_types) free = property(lambda self: self._free) dum = property(lambda self: self._dum) free_indices = property(lambda self: self._free_indices) rank = property(lambda self: self._rank) ext_rank = property(lambda self: self._ext_rank)
If the internal dummy indices in one arg conflict with the free indices of the remaining args, we need to rename those internal dummy indices.
25
186
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def __new__(cls, *args, **kw_args): is_canon_bp = kw_args.get('is_canon_bp', False) args = list(map(_sympify, args)) free = [get_free_indices(arg) for arg in args] free = set(itertools.chain(*free)) #flatten free newargs = [] for arg in args: dum_this = set(get_dummy_indices(arg)) dum_other = [get_dummy_indices(a) for a in newargs] dum_other = set(itertools.chain(*dum_other)) #flatten dum_other free_this = set(get_free_indices(arg)) if len(dum_this.intersection(free)) > 0: exclude = free_this.union(free, dum_other) newarg = TensMul._dedupe_indices(arg, exclude, arg._index_structure) else: newarg = arg newargs.append(newarg) args = newargs # Flatten: args = [i for arg in args for i in (arg.args if isinstance(arg, (TensMul, Mul)) else [arg])] args, indices, free, dum = TensMul._tensMul_contract_indices(args, replace_indices=False) # Data for indices: index_types = [i.tensor_index_type for i in indices] index_structure = _IndexStructure(free, dum, index_types, indices, canon_bp=is_canon_bp) obj = TensExpr.__new__(cls, *args) obj._indices = indices obj._index_types = index_types[:] obj._index_structure = index_structure obj._free = index_structure.free[:] obj._dum = index_structure.dum[:] obj._free_indices = {x[0] for x in obj.free} obj._rank = len(obj.free) obj._ext_rank = len(obj._index_structure.free) + 2*len(obj._index_structure.dum) obj._coeff = S.One obj._is_canon_bp = is_canon_bp return obj index_types = property(lambda self: self._index_types) free = property(lambda self: self._free) dum = property(lambda self: self._dum) free_indices = property(lambda self: self._free_indices) rank = property(lambda self: self._rank) ext_rank = property(lambda self: self._ext_rank) ``` ###Assistant : If the internal dummy indices in one arg conflict with the free indices of the remaining args, we need to rename those internal dummy indices.
1,206
def test_context_for_crash_rate_alert(self): status = TriggerStatus.ACTIVE incident = self.create_incident() alert_rule = self.create_alert_rule( aggregate="percentage(sessions_crashed, sessions) AS _crash_rate_alert_aggregate" ) alert_rule_trigger = self.create_alert_rule_trigger(alert_rule) action = self.create_alert_rule_trigger_action( alert_rule_trigger=alert_rule_trigger, triggered_for_incident=incident ) assert ( generate_incident_trigger_email_context( self.project, incident, action.alert_rule_trigger, status, IncidentStatus.CRITICAL )["aggregate"] == "percentage(sessions_crashed, sessions)" )
Test that ensures the metric name for Crash rate alerts excludes the alias
13
38
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_context_for_crash_rate_alert(self): status = TriggerStatus.ACTIVE incident = self.create_incident() alert_rule = self.create_alert_rule( aggregate="percentage(sessions_crashed, sessions) AS _crash_rate_alert_aggregate" ) alert_rule_trigger = self.create_alert_rule_trigger(alert_rule) action = self.create_alert_rule_trigger_action( alert_rule_trigger=alert_rule_trigger, triggered_for_incident=incident ) assert ( generate_incident_trigger_email_context( self.project, incident, action.alert_rule_trigger, status, IncidentStatus.CRITICAL )["aggregate"] == "percentage(sessions_crashed, sessions)" ) ``` ###Assistant : Test that ensures the metric name for Crash rate alerts excludes the alias
1,207
def get_template(self, template_name, skip=None): tried = [] for origin in self.get_template_sources(template_name): if skip is not None and origin in skip: tried.append((origin, "Skipped to avoid recursion")) continue try: contents = self.get_contents(origin) except TemplateDoesNotExist: tried.append((origin, "Source does not exist")) continue else: return Template( contents, origin, origin.template_name, self.engine, ) raise TemplateDoesNotExist(template_name, tried=tried)
Call self.get_template_sources() and return a Template object for the first template matching template_name. If skip is provided, ignore template origins in skip. This is used to avoid recursion during template extending.
31
49
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_template(self, template_name, skip=None): tried = [] for origin in self.get_template_sources(template_name): if skip is not None and origin in skip: tried.append((origin, "Skipped to avoid recursion")) continue try: contents = self.get_contents(origin) except TemplateDoesNotExist: tried.append((origin, "Source does not exist")) continue else: return Template( contents, origin, origin.template_name, self.engine, ) raise TemplateDoesNotExist(template_name, tried=tried) ``` ###Assistant : Call self.get_template_sources() and return a Template object for the first template matching template_name. If skip is provided, ignore template origins in skip. This is used to avoid recursion during template extending.
1,208
def from_config(cls, config): if "learning_rate" in config: if isinstance(config["learning_rate"], dict): config["learning_rate"] = learning_rate_schedule.deserialize( config["learning_rate"] ) return cls(**config) base_optimizer_keyword_args = # pylint: disable=g-classes-have-attributes @keras_export("keras.optimizers.experimental.Optimizer", v1=[])
Creates an optimizer from its config. This method is the reverse of `get_config`, capable of instantiating the same optimizer from the config dictionary. Args: config: A Python dictionary, typically the output of get_config. Returns: An optimizer instance. name: String. The name to use for momentum accumulator weights created by the optimizer. clipnorm: Float. If set, the gradient of each weight is individually clipped so that its norm is no higher than this value. clipvalue: Float. If set, the gradient of each weight is clipped to be no higher than this value. global_clipnorm: Float. If set, the gradient of all weights is clipped so that their global norm is no higher than this value. use_ema: Boolean, defaults to False. If True, exponential moving average (EMA) is applied. EMA consists of computing an exponential moving average of the weights of the model (as the weight values change after each training batch), and periodically overwriting the weights with their moving average. ema_momentum: Float, defaults to 0.99. Only used if `use_ema=True`. This is the momentum to use when computing the EMA of the model's weights: `new_average = ema_momentum * old_average + (1 - ema_momentum) * current_variable_value`. ema_overwrite_frequency: Int or None, defaults to None. Only used if `use_ema=True`. Every `ema_overwrite_frequency` steps of iterations, we overwrite the model variable by its moving average. If None, the optimizer does not overwrite model variables in the middle of training, and you need to explicitly overwrite the variables at the end of training by calling `optimizer.finalize_variable_values()` (which updates the model variables in-place). When using the built-in `fit()` training loop, this happens automatically after the last epoch, and you don't need to do anything. jit_compile: Boolean, defaults to True. If True, the optimizer will use XLA compilation. If no GPU device is found, this flag will be ignored. **kwargs: keyword arguments only used for backward compatibility.
306
24
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def from_config(cls, config): if "learning_rate" in config: if isinstance(config["learning_rate"], dict): config["learning_rate"] = learning_rate_schedule.deserialize( config["learning_rate"] ) return cls(**config) base_optimizer_keyword_args = # pylint: disable=g-classes-have-attributes @keras_export("keras.optimizers.experimental.Optimizer", v1=[]) ``` ###Assistant : Creates an optimizer from its config. This method is the reverse of `get_config`, capable of instantiating the same optimizer from the config dictionary. Args: config: A Python dictionary, typically the output of get_config. Returns: An optimizer instance. name: String. The name to use for momentum accumulator weights created by the optimizer. clipnorm: Float. If set, the gradient of each weight is individually clipped so that its norm is no higher than this value. clipvalue: Float. If set, the gradient of each weight is clipped to be no higher than this value. global_clipnorm: Float. If set, the gradient of all weights is clipped so that their global norm is no higher than this value. use_ema: Boolean, defaults to False. If True, exponential moving average (EMA) is applied. EMA consists of computing an exponential moving average of the weights of the model (as the weight values change after each training batch), and periodically overwriting the weights with their moving average. ema_momentum: Float, defaults to 0.99. Only used if `use_ema=True`. This is the momentum to use when computing the EMA of the model's weights: `new_average = ema_momentum * old_average + (1 - ema_momentum) * current_variable_value`. ema_overwrite_frequency: Int or None, defaults to None. Only used if `use_ema=True`. Every `ema_overwrite_frequency` steps of iterations, we overwrite the model variable by its moving average. If None, the optimizer does not overwrite model variables in the middle of training, and you need to explicitly overwrite the variables at the end of training by calling `optimizer.finalize_variable_values()` (which updates the model variables in-place). When using the built-in `fit()` training loop, this happens automatically after the last epoch, and you don't need to do anything. jit_compile: Boolean, defaults to True. If True, the optimizer will use XLA compilation. If no GPU device is found, this flag will be ignored. **kwargs: keyword arguments only used for backward compatibility.
1,209
def close(self): # When application exit, system shuts down all handlers by # calling close method. Here we check if logger is already # closed to prevent uploading the log to remote storage multiple # times when `logging.shutdown` is called. if self.closed: return super().close() if not self.upload_on_close: return local_loc = os.path.join(self.local_base, self.log_relative_path) remote_loc = os.path.join(self.remote_base, self.log_relative_path) if os.path.exists(local_loc): # read log and remove old logs to get just the latest additions log = pathlib.Path(local_loc).read_text() self.s3_write(log, remote_loc) # Mark closed so we don't double write if close is called twice self.closed = True
Close and upload local log file to remote storage S3.
10
92
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def close(self): # When application exit, system shuts down all handlers by # calling close method. Here we check if logger is already # closed to prevent uploading the log to remote storage multiple # times when `logging.shutdown` is called. if self.closed: return super().close() if not self.upload_on_close: return local_loc = os.path.join(self.local_base, self.log_relative_path) remote_loc = os.path.join(self.remote_base, self.log_relative_path) if os.path.exists(local_loc): # read log and remove old logs to get just the latest additions log = pathlib.Path(local_loc).read_text() self.s3_write(log, remote_loc) # Mark closed so we don't double write if close is called twice self.closed = True ``` ###Assistant : Close and upload local log file to remote storage S3.
1,210
def safestring_in_template_exception(request): template = Template('{% extends "<script>alert(1);</script>" %}') try: template.render(Context()) except Exception: return technical_500_response(request, *sys.exc_info())
Trigger an exception in the template machinery which causes a SafeString to be inserted as args[0] of the Exception.
19
15
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def safestring_in_template_exception(request): template = Template('{% extends "<script>alert(1);</script>" %}') try: template.render(Context()) except Exception: return technical_500_response(request, *sys.exc_info()) ``` ###Assistant : Trigger an exception in the template machinery which causes a SafeString to be inserted as args[0] of the Exception.
1,211
async def test_unique_id_in_progress(hass, manager): mock_integration(hass, MockModule("comp")) mock_entity_platform(hass, "config_flow.comp", None)
Test that we abort if there is already a flow in progress with same unique id.
16
9
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python async def test_unique_id_in_progress(hass, manager): mock_integration(hass, MockModule("comp")) mock_entity_platform(hass, "config_flow.comp", None) ``` ###Assistant : Test that we abort if there is already a flow in progress with same unique id.
1,212
def test_simple(self) -> None: event_factory = self.hs.get_event_builder_factory() bob = "@creator:test" alice = "@alice:test" room_id = "!room:test" # Ensure that we have a rooms entry so that we generate the chain index. self.get_success( self.store.store_room( room_id=room_id, room_creator_user_id="", is_public=True, room_version=RoomVersions.V6, ) ) create = self.get_success( event_factory.for_room_version( RoomVersions.V6, { "type": EventTypes.Create, "state_key": "", "sender": bob, "room_id": room_id, "content": {"tag": "create"}, }, ).build(prev_event_ids=[], auth_event_ids=[]) ) bob_join = self.get_success( event_factory.for_room_version( RoomVersions.V6, { "type": EventTypes.Member, "state_key": bob, "sender": bob, "room_id": room_id, "content": {"tag": "bob_join"}, }, ).build(prev_event_ids=[], auth_event_ids=[create.event_id]) ) power = self.get_success( event_factory.for_room_version( RoomVersions.V6, { "type": EventTypes.PowerLevels, "state_key": "", "sender": bob, "room_id": room_id, "content": {"tag": "power"}, }, ).build( prev_event_ids=[], auth_event_ids=[create.event_id, bob_join.event_id], ) ) alice_invite = self.get_success( event_factory.for_room_version( RoomVersions.V6, { "type": EventTypes.Member, "state_key": alice, "sender": bob, "room_id": room_id, "content": {"tag": "alice_invite"}, }, ).build( prev_event_ids=[], auth_event_ids=[create.event_id, bob_join.event_id, power.event_id], ) ) alice_join = self.get_success( event_factory.for_room_version( RoomVersions.V6, { "type": EventTypes.Member, "state_key": alice, "sender": alice, "room_id": room_id, "content": {"tag": "alice_join"}, }, ).build( prev_event_ids=[], auth_event_ids=[create.event_id, alice_invite.event_id, power.event_id], ) ) power_2 = self.get_success( event_factory.for_room_version( RoomVersions.V6, { "type": EventTypes.PowerLevels, "state_key": "", "sender": bob, "room_id": room_id, "content": {"tag": "power_2"}, }, ).build( prev_event_ids=[], auth_event_ids=[create.event_id, bob_join.event_id, power.event_id], ) ) bob_join_2 = self.get_success( event_factory.for_room_version( RoomVersions.V6, { "type": EventTypes.Member, "state_key": bob, "sender": bob, "room_id": room_id, "content": {"tag": "bob_join_2"}, }, ).build( prev_event_ids=[], auth_event_ids=[create.event_id, bob_join.event_id, power.event_id], ) ) alice_join2 = self.get_success( event_factory.for_room_version( RoomVersions.V6, { "type": EventTypes.Member, "state_key": alice, "sender": alice, "room_id": room_id, "content": {"tag": "alice_join2"}, }, ).build( prev_event_ids=[], auth_event_ids=[ create.event_id, alice_join.event_id, power_2.event_id, ], ) ) events = [ create, bob_join, power, alice_invite, alice_join, bob_join_2, power_2, alice_join2, ] expected_links = [ (bob_join, create), (power, create), (power, bob_join), (alice_invite, create), (alice_invite, power), (alice_invite, bob_join), (bob_join_2, power), (alice_join2, power_2), ] self.persist(events) chain_map, link_map = self.fetch_chains(events) # Check that the expected links and only the expected links have been # added. self.assertEqual(len(expected_links), len(list(link_map.get_additions()))) for start, end in expected_links: start_id, start_seq = chain_map[start.event_id] end_id, end_seq = chain_map[end.event_id] self.assertIn( (start_seq, end_seq), list(link_map.get_links_between(start_id, end_id)) ) # Test that everything can reach the create event, but the create event # can't reach anything. for event in events[1:]: self.assertTrue( link_map.exists_path_from( chain_map[event.event_id], chain_map[create.event_id] ), ) self.assertFalse( link_map.exists_path_from( chain_map[create.event_id], chain_map[event.event_id], ), )
Test that the example in `docs/auth_chain_difference_algorithm.md` works.
7
338
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_simple(self) -> None: event_factory = self.hs.get_event_builder_factory() bob = "@creator:test" alice = "@alice:test" room_id = "!room:test" # Ensure that we have a rooms entry so that we generate the chain index. self.get_success( self.store.store_room( room_id=room_id, room_creator_user_id="", is_public=True, room_version=RoomVersions.V6, ) ) create = self.get_success( event_factory.for_room_version( RoomVersions.V6, { "type": EventTypes.Create, "state_key": "", "sender": bob, "room_id": room_id, "content": {"tag": "create"}, }, ).build(prev_event_ids=[], auth_event_ids=[]) ) bob_join = self.get_success( event_factory.for_room_version( RoomVersions.V6, { "type": EventTypes.Member, "state_key": bob, "sender": bob, "room_id": room_id, "content": {"tag": "bob_join"}, }, ).build(prev_event_ids=[], auth_event_ids=[create.event_id]) ) power = self.get_success( event_factory.for_room_version( RoomVersions.V6, { "type": EventTypes.PowerLevels, "state_key": "", "sender": bob, "room_id": room_id, "content": {"tag": "power"}, }, ).build( prev_event_ids=[], auth_event_ids=[create.event_id, bob_join.event_id], ) ) alice_invite = self.get_success( event_factory.for_room_version( RoomVersions.V6, { "type": EventTypes.Member, "state_key": alice, "sender": bob, "room_id": room_id, "content": {"tag": "alice_invite"}, }, ).build( prev_event_ids=[], auth_event_ids=[create.event_id, bob_join.event_id, power.event_id], ) ) alice_join = self.get_success( event_factory.for_room_version( RoomVersions.V6, { "type": EventTypes.Member, "state_key": alice, "sender": alice, "room_id": room_id, "content": {"tag": "alice_join"}, }, ).build( prev_event_ids=[], auth_event_ids=[create.event_id, alice_invite.event_id, power.event_id], ) ) power_2 = self.get_success( event_factory.for_room_version( RoomVersions.V6, { "type": EventTypes.PowerLevels, "state_key": "", "sender": bob, "room_id": room_id, "content": {"tag": "power_2"}, }, ).build( prev_event_ids=[], auth_event_ids=[create.event_id, bob_join.event_id, power.event_id], ) ) bob_join_2 = self.get_success( event_factory.for_room_version( RoomVersions.V6, { "type": EventTypes.Member, "state_key": bob, "sender": bob, "room_id": room_id, "content": {"tag": "bob_join_2"}, }, ).build( prev_event_ids=[], auth_event_ids=[create.event_id, bob_join.event_id, power.event_id], ) ) alice_join2 = self.get_success( event_factory.for_room_version( RoomVersions.V6, { "type": EventTypes.Member, "state_key": alice, "sender": alice, "room_id": room_id, "content": {"tag": "alice_join2"}, }, ).build( prev_event_ids=[], auth_event_ids=[ create.event_id, alice_join.event_id, power_2.event_id, ], ) ) events = [ create, bob_join, power, alice_invite, alice_join, bob_join_2, power_2, alice_join2, ] expected_links = [ (bob_join, create), (power, create), (power, bob_join), (alice_invite, create), (alice_invite, power), (alice_invite, bob_join), (bob_join_2, power), (alice_join2, power_2), ] self.persist(events) chain_map, link_map = self.fetch_chains(events) # Check that the expected links and only the expected links have been # added. self.assertEqual(len(expected_links), len(list(link_map.get_additions()))) for start, end in expected_links: start_id, start_seq = chain_map[start.event_id] end_id, end_seq = chain_map[end.event_id] self.assertIn( (start_seq, end_seq), list(link_map.get_links_between(start_id, end_id)) ) # Test that everything can reach the create event, but the create event # can't reach anything. for event in events[1:]: self.assertTrue( link_map.exists_path_from( chain_map[event.event_id], chain_map[create.event_id] ), ) self.assertFalse( link_map.exists_path_from( chain_map[create.event_id], chain_map[event.event_id], ), ) ``` ###Assistant : Test that the example in `docs/auth_chain_difference_algorithm.md` works.
1,213
async def _do_retry(self, func, attempts=3) -> Any: # pylint: disable=no-self-use exception = None for attempt in range(1, attempts + 1): _LOGGER.debug("Attempt %s of %s", attempt, attempts) try: return await func() except Exception as exception1: # pylint: disable=broad-except _LOGGER.debug( "Failed attempt %s of %s (%s)", attempt, attempts, exception1 ) # Make each backoff pause a little bit longer await asyncio.sleep(0.5 * attempt) exception = exception1 continue if exception: raise exception
Retry a function call. Withings' API occasionally and incorrectly throws errors. Retrying the call tends to work.
17
69
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python async def _do_retry(self, func, attempts=3) -> Any: # pylint: disable=no-self-use exception = None for attempt in range(1, attempts + 1): _LOGGER.debug("Attempt %s of %s", attempt, attempts) try: return await func() except Exception as exception1: # pylint: disable=broad-except _LOGGER.debug( "Failed attempt %s of %s (%s)", attempt, attempts, exception1 ) # Make each backoff pause a little bit longer await asyncio.sleep(0.5 * attempt) exception = exception1 continue if exception: raise exception ``` ###Assistant : Retry a function call. Withings' API occasionally and incorrectly throws errors. Retrying the call tends to work.
1,214
def refactor_doctest(self, block, lineno, indent, filename): try: tree = self.parse_block(block, lineno, indent) except Exception as err: if self.logger.isEnabledFor(logging.DEBUG): for line in block: self.log_debug("Source: %s", line.rstrip("\n")) self.log_error("Can't parse docstring in %s line %s: %s: %s", filename, lineno, err.__class__.__name__, err) return block if self.refactor_tree(tree, filename): new = str(tree).splitlines(keepends=True) # Undo the adjustment of the line numbers in wrap_toks() below. clipped, new = new[:lineno-1], new[lineno-1:] assert clipped == ["\n"] * (lineno-1), clipped if not new[-1].endswith("\n"): new[-1] += "\n" block = [indent + self.PS1 + new.pop(0)] if new: block += [indent + self.PS2 + line for line in new] return block
Refactors one doctest. A doctest is given as a block of lines, the first of which starts with ">>>" (possibly indented), while the remaining lines start with "..." (identically indented).
30
97
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def refactor_doctest(self, block, lineno, indent, filename): try: tree = self.parse_block(block, lineno, indent) except Exception as err: if self.logger.isEnabledFor(logging.DEBUG): for line in block: self.log_debug("Source: %s", line.rstrip("\n")) self.log_error("Can't parse docstring in %s line %s: %s: %s", filename, lineno, err.__class__.__name__, err) return block if self.refactor_tree(tree, filename): new = str(tree).splitlines(keepends=True) # Undo the adjustment of the line numbers in wrap_toks() below. clipped, new = new[:lineno-1], new[lineno-1:] assert clipped == ["\n"] * (lineno-1), clipped if not new[-1].endswith("\n"): new[-1] += "\n" block = [indent + self.PS1 + new.pop(0)] if new: block += [indent + self.PS2 + line for line in new] return block ``` ###Assistant : Refactors one doctest. A doctest is given as a block of lines, the first of which starts with ">>>" (possibly indented), while the remaining lines start with "..." (identically indented).
1,215
def get_tables(self, dataset_id) -> Response: client = self.connect() result = client.list_tables(dataset_id) return result
Get a list with all of the tabels in BigQuery
10
13
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_tables(self, dataset_id) -> Response: client = self.connect() result = client.list_tables(dataset_id) return result ``` ###Assistant : Get a list with all of the tabels in BigQuery
1,216
def mac_set_relative_dylib_deps(libname, distname): from macholib import util from macholib.MachO import MachO # Ignore bootloader; otherwise PyInstaller fails with exception like # 'ValueError: total_size > low_offset (288 > 0)' if os.path.basename(libname) in _BOOTLOADER_FNAMES: return # Determine how many directories up ('../') is the directory with shared dynamic libraries. # E.g., ./qt4_plugins/images/ -> ./../../ parent_dir = '' # Check if distname is not only base filename. if os.path.dirname(distname): parent_level = len(os.path.dirname(distname).split(os.sep)) parent_dir = parent_level * (os.pardir + os.sep)
On Mac OS set relative paths to dynamic library dependencies of `libname`. Relative paths allow to avoid using environment variable DYLD_LIBRARY_PATH. There are known some issues with DYLD_LIBRARY_PATH. Relative paths is more flexible mechanism. Current location of dependent libraries is derived from the location of the library path (paths start with '@loader_path'). 'distname' path of the library relative to dist directory of frozen executable. We need this to determine the level of directory level for @loader_path of binaries not found in dist directory. For example, Qt5 plugins are not in the same directory as Qt*.dylib files. Without using '@loader_path/../..' for Qt plugins, Mac OS would not be able to resolve shared library dependencies, and Qt plugins will not be loaded.
120
76
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def mac_set_relative_dylib_deps(libname, distname): from macholib import util from macholib.MachO import MachO # Ignore bootloader; otherwise PyInstaller fails with exception like # 'ValueError: total_size > low_offset (288 > 0)' if os.path.basename(libname) in _BOOTLOADER_FNAMES: return # Determine how many directories up ('../') is the directory with shared dynamic libraries. # E.g., ./qt4_plugins/images/ -> ./../../ parent_dir = '' # Check if distname is not only base filename. if os.path.dirname(distname): parent_level = len(os.path.dirname(distname).split(os.sep)) parent_dir = parent_level * (os.pardir + os.sep) ``` ###Assistant : On Mac OS set relative paths to dynamic library dependencies of `libname`. Relative paths allow to avoid using environment variable DYLD_LIBRARY_PATH. There are known some issues with DYLD_LIBRARY_PATH. Relative paths is more flexible mechanism. Current location of dependent libraries is derived from the location of the library path (paths start with '@loader_path'). 'distname' path of the library relative to dist directory of frozen executable. We need this to determine the level of directory level for @loader_path of binaries not found in dist directory. For example, Qt5 plugins are not in the same directory as Qt*.dylib files. Without using '@loader_path/../..' for Qt plugins, Mac OS would not be able to resolve shared library dependencies, and Qt plugins will not be loaded.
1,217
def get_v2_optimizer(name, **kwargs): try: return _V2_OPTIMIZER_MAP[name](**kwargs) except KeyError: raise ValueError( "Could not find requested v2 optimizer: {}\nValid choices: {}".format( name, list(_V2_OPTIMIZER_MAP.keys()) ) )
Get the v2 optimizer requested. This is only necessary until v2 are the default, as we are testing in Eager, and Eager + v1 optimizers fail tests. When we are in v2, the strings alone should be sufficient, and this mapping can theoretically be removed. Args: name: string name of Keras v2 optimizer. **kwargs: any kwargs to pass to the optimizer constructor. Returns: Initialized Keras v2 optimizer. Raises: ValueError: if an unknown name was passed.
75
23
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_v2_optimizer(name, **kwargs): try: return _V2_OPTIMIZER_MAP[name](**kwargs) except KeyError: raise ValueError( "Could not find requested v2 optimizer: {}\nValid choices: {}".format( name, list(_V2_OPTIMIZER_MAP.keys()) ) ) ``` ###Assistant : Get the v2 optimizer requested. This is only necessary until v2 are the default, as we are testing in Eager, and Eager + v1 optimizers fail tests. When we are in v2, the strings alone should be sufficient, and this mapping can theoretically be removed. Args: name: string name of Keras v2 optimizer. **kwargs: any kwargs to pass to the optimizer constructor. Returns: Initialized Keras v2 optimizer. Raises: ValueError: if an unknown name was passed.
1,218
def _prefix_from_ip_int(cls, ip_int): trailing_zeroes = _count_righthand_zero_bits(ip_int, cls._max_prefixlen) prefixlen = cls._max_prefixlen - trailing_zeroes leading_ones = ip_int >> trailing_zeroes all_ones = (1 << prefixlen) - 1 if leading_ones != all_ones: byteslen = cls._max_prefixlen // 8 details = ip_int.to_bytes(byteslen, 'big') msg = 'Netmask pattern %r mixes zeroes & ones' raise ValueError(msg % details) return prefixlen
Return prefix length from the bitwise netmask. Args: ip_int: An integer, the netmask in expanded bitwise format Returns: An integer, the prefix length. Raises: ValueError: If the input intermingles zeroes & ones
32
52
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _prefix_from_ip_int(cls, ip_int): trailing_zeroes = _count_righthand_zero_bits(ip_int, cls._max_prefixlen) prefixlen = cls._max_prefixlen - trailing_zeroes leading_ones = ip_int >> trailing_zeroes all_ones = (1 << prefixlen) - 1 if leading_ones != all_ones: byteslen = cls._max_prefixlen // 8 details = ip_int.to_bytes(byteslen, 'big') msg = 'Netmask pattern %r mixes zeroes & ones' raise ValueError(msg % details) return prefixlen ``` ###Assistant : Return prefix length from the bitwise netmask. Args: ip_int: An integer, the netmask in expanded bitwise format Returns: An integer, the prefix length. Raises: ValueError: If the input intermingles zeroes & ones
1,219
def batch_has_learnable_example(self, examples): for eg in examples: for ent in eg.predicted.ents: candidates = list(self.get_candidates(self.kb, ent)) if candidates: return True return False
Check if a batch contains a learnable example. If one isn't present, then the update step needs to be skipped.
20
21
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def batch_has_learnable_example(self, examples): for eg in examples: for ent in eg.predicted.ents: candidates = list(self.get_candidates(self.kb, ent)) if candidates: return True return False ``` ###Assistant : Check if a batch contains a learnable example. If one isn't present, then the update step needs to be skipped.
1,220
def get_delayed_update_fields(self): self.extra_update_fields['emitted_events'] = self.event_ct if 'got an unexpected keyword argument' in self.extra_update_fields.get('result_traceback', ''): self.delay_update(result_traceback=ANSIBLE_RUNNER_NEEDS_UPDATE_MESSAGE) return self.extra_update_fields
Return finalized dict of all fields that should be saved along with the job status change
16
17
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_delayed_update_fields(self): self.extra_update_fields['emitted_events'] = self.event_ct if 'got an unexpected keyword argument' in self.extra_update_fields.get('result_traceback', ''): self.delay_update(result_traceback=ANSIBLE_RUNNER_NEEDS_UPDATE_MESSAGE) return self.extra_update_fields ``` ###Assistant : Return finalized dict of all fields that should be saved along with the job status change
1,221
def get_checks_result(warning_id=None): checks_result = checks.run_checks() if warning_id: return [ warning for warning in checks_result if warning.id == warning_id] return checks_result
Run Django checks on any with the 'search' tag used when registering the check
14
20
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_checks_result(warning_id=None): checks_result = checks.run_checks() if warning_id: return [ warning for warning in checks_result if warning.id == warning_id] return checks_result ``` ###Assistant : Run Django checks on any with the 'search' tag used when registering the check
1,222
def use_bottleneck_cb(key) -> None: from pandas.core import nanops nanops.set_use_bottleneck(cf.get_option(key)) use_numexpr_doc =
: bool Use the numexpr library to accelerate computation if it is installed, the default is True Valid values: False,True
20
11
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def use_bottleneck_cb(key) -> None: from pandas.core import nanops nanops.set_use_bottleneck(cf.get_option(key)) use_numexpr_doc = ``` ###Assistant : : bool Use the numexpr library to accelerate computation if it is installed, the default is True Valid values: False,True
1,223
def test_repeated_column_labels(self, datapath): # GH 13923, 25772 msg = with pytest.raises(ValueError, match=msg): read_stata( datapath("io", "data", "stata", "stata15.dta"), convert_categoricals=True, )
Value labels for column ethnicsn are not unique. These cannot be converted to pandas categoricals. Either read the file with `convert_categoricals` set to False or use the low level interface in `StataReader` to separately read the values and the value_labels. The repeated labels are:\n-+\nwolof
44
19
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_repeated_column_labels(self, datapath): # GH 13923, 25772 msg = with pytest.raises(ValueError, match=msg): read_stata( datapath("io", "data", "stata", "stata15.dta"), convert_categoricals=True, ) ``` ###Assistant : Value labels for column ethnicsn are not unique. These cannot be converted to pandas categoricals. Either read the file with `convert_categoricals` set to False or use the low level interface in `StataReader` to separately read the values and the value_labels. The repeated labels are:\n-+\nwolof
1,224
def string_width_in_pixels(cls, font, string): # if no windows have been created (there is no hidden master root to rely on) then temporarily make a window so the measurement can happen if Window.NumOpenWindows == 0: root = tk.Tk() else: root = None size = 0 try: size = tkinter.font.Font(font=font).measure(string) # string's width except Exception as e: _error_popup_with_traceback('Exception retrieving string width in pixels', e) if root is not None: root.destroy() return size
Get the with of the supplied string in pixels for the font being passed in. If an error occurs, 0 will be returned :param font: specifies the font family, size, etc. Tuple or Single string format 'name size styles'. Styles: italic * roman bold normal underline overstrike, to be measured :type font: (str or (str, int[, str]) or None) :param string: the string to measure :type string: str :return: Width in pixels of string :rtype: (int)
76
70
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def string_width_in_pixels(cls, font, string): # if no windows have been created (there is no hidden master root to rely on) then temporarily make a window so the measurement can happen if Window.NumOpenWindows == 0: root = tk.Tk() else: root = None size = 0 try: size = tkinter.font.Font(font=font).measure(string) # string's width except Exception as e: _error_popup_with_traceback('Exception retrieving string width in pixels', e) if root is not None: root.destroy() return size ``` ###Assistant : Get the with of the supplied string in pixels for the font being passed in. If an error occurs, 0 will be returned :param font: specifies the font family, size, etc. Tuple or Single string format 'name size styles'. Styles: italic * roman bold normal underline overstrike, to be measured :type font: (str or (str, int[, str]) or None) :param string: the string to measure :type string: str :return: Width in pixels of string :rtype: (int)
1,225
def model_call_inputs(model, keep_original_batch_size=False): input_specs = model.save_spec(dynamic_batch=not keep_original_batch_size) if input_specs is None: return None, None input_specs = _enforce_names_consistency(input_specs) return input_specs
Inspect model to get its input signature. The model's input signature is a list with a single (possibly-nested) object. This is due to the Keras-enforced restriction that tensor inputs must be passed in as the first argument. For example, a model with input {'feature1': <Tensor>, 'feature2': <Tensor>} will have input signature: [{'feature1': TensorSpec, 'feature2': TensorSpec}] Args: model: Keras Model object. keep_original_batch_size: A boolean indicating whether we want to keep using the original batch size or set it to None. Default is `False`, which means that the batch dim of the returned input signature will always be set to `None`. Returns: A tuple containing `(args, kwargs)` TensorSpecs of the model call function inputs. `kwargs` does not contain the `training` argument.
119
19
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def model_call_inputs(model, keep_original_batch_size=False): input_specs = model.save_spec(dynamic_batch=not keep_original_batch_size) if input_specs is None: return None, None input_specs = _enforce_names_consistency(input_specs) return input_specs ``` ###Assistant : Inspect model to get its input signature. The model's input signature is a list with a single (possibly-nested) object. This is due to the Keras-enforced restriction that tensor inputs must be passed in as the first argument. For example, a model with input {'feature1': <Tensor>, 'feature2': <Tensor>} will have input signature: [{'feature1': TensorSpec, 'feature2': TensorSpec}] Args: model: Keras Model object. keep_original_batch_size: A boolean indicating whether we want to keep using the original batch size or set it to None. Default is `False`, which means that the batch dim of the returned input signature will always be set to `None`. Returns: A tuple containing `(args, kwargs)` TensorSpecs of the model call function inputs. `kwargs` does not contain the `training` argument.
1,226
def _getSubDirectoryFolders(self, module, sub_dirs): module_dir = module.getCompileTimeDirectory() file_list = [] data_dirs = [os.path.join(module_dir, subdir) for subdir in sub_dirs] # Gather the full file list, probably makes no sense to include bytecode files file_list = sum( ( getFileList( data_dir, ignore_dirs=("__pycache__",), ignore_suffixes=(".pyc",) ) for data_dir in data_dirs ), [], ) if not file_list: msg = "No files or folders found for '%s' in subfolder(s) %r (%r)." % ( module.getFullName(), sub_dirs, data_dirs, ) self.warning(msg) is_package = ( module.isCompiledPythonPackage() or module.isUncompiledPythonPackage() ) # We need to preserve the package target path in the dist folder. if is_package: package_part = module.getFullName().asPath() else: package = module.getFullName().getPackageName() if package is None: package_part = "" else: package_part = package.asPath() item_set = OrderedSet() for f in file_list: target = os.path.join(package_part, os.path.relpath(f, module_dir)) dir_name = os.path.dirname(target) item_set.add(dir_name) return self.makeIncludedEmptyDirectories( source_path=module_dir, dest_paths=item_set, reason="Subdirectories of module %s" % module.getFullName(), tags="config", )
Get dirnames in given subdirs of the module. Notes: All dirnames in folders below one of the sub_dirs are recursively retrieved and returned shortened to begin with the string of subdir. Args: module: module object sub_dirs: sub folder name(s) - tuple Returns: makeIncludedEmptyDirectories of found dirnames.
46
139
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _getSubDirectoryFolders(self, module, sub_dirs): module_dir = module.getCompileTimeDirectory() file_list = [] data_dirs = [os.path.join(module_dir, subdir) for subdir in sub_dirs] # Gather the full file list, probably makes no sense to include bytecode files file_list = sum( ( getFileList( data_dir, ignore_dirs=("__pycache__",), ignore_suffixes=(".pyc",) ) for data_dir in data_dirs ), [], ) if not file_list: msg = "No files or folders found for '%s' in subfolder(s) %r (%r)." % ( module.getFullName(), sub_dirs, data_dirs, ) self.warning(msg) is_package = ( module.isCompiledPythonPackage() or module.isUncompiledPythonPackage() ) # We need to preserve the package target path in the dist folder. if is_package: package_part = module.getFullName().asPath() else: package = module.getFullName().getPackageName() if package is None: package_part = "" else: package_part = package.asPath() item_set = OrderedSet() for f in file_list: target = os.path.join(package_part, os.path.relpath(f, module_dir)) dir_name = os.path.dirname(target) item_set.add(dir_name) return self.makeIncludedEmptyDirectories( source_path=module_dir, dest_paths=item_set, reason="Subdirectories of module %s" % module.getFullName(), tags="config", ) ``` ###Assistant : Get dirnames in given subdirs of the module. Notes: All dirnames in folders below one of the sub_dirs are recursively retrieved and returned shortened to begin with the string of subdir. Args: module: module object sub_dirs: sub folder name(s) - tuple Returns: makeIncludedEmptyDirectories of found dirnames.
1,227
def test_orderby_percentile_with_many_fields_one_entity_no_data(self): for metric in [ TransactionMRI.MEASUREMENTS_FCP.value, "transaction", ]: perf_indexer_record(self.organization.id, metric) response = self.get_success_response( self.organization.slug, field=[ f"p50({TransactionMetricKey.MEASUREMENTS_LCP.value})", f"p50({TransactionMetricKey.MEASUREMENTS_FCP.value})", ], statsPeriod="1h", interval="1h", groupBy=["project_id", "transaction"], orderBy=f"p50({TransactionMetricKey.MEASUREMENTS_LCP.value})", useCase="performance", ) groups = response.data["groups"] assert len(groups) == 0
Test that ensures that when metrics data is available then an empty response is returned gracefully
16
33
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_orderby_percentile_with_many_fields_one_entity_no_data(self): for metric in [ TransactionMRI.MEASUREMENTS_FCP.value, "transaction", ]: perf_indexer_record(self.organization.id, metric) response = self.get_success_response( self.organization.slug, field=[ f"p50({TransactionMetricKey.MEASUREMENTS_LCP.value})", f"p50({TransactionMetricKey.MEASUREMENTS_FCP.value})", ], statsPeriod="1h", interval="1h", groupBy=["project_id", "transaction"], orderBy=f"p50({TransactionMetricKey.MEASUREMENTS_LCP.value})", useCase="performance", ) groups = response.data["groups"] assert len(groups) == 0 ``` ###Assistant : Test that ensures that when metrics data is available then an empty response is returned gracefully
1,228
def _add_save_button(self) -> None: logger.debug("Adding save button") button = tk.Button(self, text="Save", cursor="hand2", command=lambda: self.save_var.set(True)) button.pack(side=tk.LEFT) logger.debug("Added save burron: '%s'", button)
Add a save button for saving out original preview
9
20
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _add_save_button(self) -> None: logger.debug("Adding save button") button = tk.Button(self, text="Save", cursor="hand2", command=lambda: self.save_var.set(True)) button.pack(side=tk.LEFT) logger.debug("Added save burron: '%s'", button) ``` ###Assistant : Add a save button for saving out original preview
1,229
def temporary_environ_defaults(**kwargs): old_env = os.environ.copy() try: for var in kwargs: # TODO: Consider warning on conflicts os.environ.setdefault(var, str(kwargs[var])) yield {var: os.environ[var] for var in kwargs} finally: for var in kwargs: if old_env.get(var): os.environ[var] = old_env[var] else: os.environ.pop(var, None)
Temporarily override default values in os.environ. Yields a dictionary of the key/value pairs matching the provided keys.
17
38
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def temporary_environ_defaults(**kwargs): old_env = os.environ.copy() try: for var in kwargs: # TODO: Consider warning on conflicts os.environ.setdefault(var, str(kwargs[var])) yield {var: os.environ[var] for var in kwargs} finally: for var in kwargs: if old_env.get(var): os.environ[var] = old_env[var] else: os.environ.pop(var, None) ``` ###Assistant : Temporarily override default values in os.environ. Yields a dictionary of the key/value pairs matching the provided keys.
1,230
def test_same_entity_multiple_metric_ids_missing_data(self, mocked_derived_metrics): mocked_derived_metrics.return_value = MOCKED_DERIVED_METRICS_2 _indexer_record(self.organization.id, "metric_foo_doe") self.store_session( self.build_session( project_id=self.project.id, started=(time.time() // 60) * 60, status="ok", release="[email protected]", errors=2, ) ) response = self.get_response( self.organization.slug, "derived_metric.multiple_metrics", ) assert response.status_code == 404 assert response.json()["detail"] == ( "Not all the requested metrics or the constituent metrics in " "['derived_metric.multiple_metrics'] have data in the dataset" )
Test when not requested metrics have data in the dataset
10
53
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_same_entity_multiple_metric_ids_missing_data(self, mocked_derived_metrics): mocked_derived_metrics.return_value = MOCKED_DERIVED_METRICS_2 _indexer_record(self.organization.id, "metric_foo_doe") self.store_session( self.build_session( project_id=self.project.id, started=(time.time() // 60) * 60, status="ok", release="[email protected]", errors=2, ) ) response = self.get_response( self.organization.slug, "derived_metric.multiple_metrics", ) assert response.status_code == 404 assert response.json()["detail"] == ( "Not all the requested metrics or the constituent metrics in " "['derived_metric.multiple_metrics'] have data in the dataset" ) ``` ###Assistant : Test when not requested metrics have data in the dataset
1,231
def _align_matrices(x, y): x_matrix = _to_matrix(x) y_matrix = _to_matrix(y) x_shape = x_matrix.shape y_shape = y_matrix.shape if y_shape[1] != x_shape[1]: # dimensions do not match. raise ValueError( "The outermost dimensions of the input tensors should match. " f"Received y = {y_shape[1]} vs x = {x_shape[1]}." ) x_tile = tf.tile(tf.expand_dims(x_matrix, 1), [1, y_shape[0], 1]) y_tile = tf.tile(tf.expand_dims(y_matrix, 0), [x_shape[0], 1, 1]) return x_tile, y_tile
Aligns x and y tensors to allow computations over pairs of their rows.
13
62
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _align_matrices(x, y): x_matrix = _to_matrix(x) y_matrix = _to_matrix(y) x_shape = x_matrix.shape y_shape = y_matrix.shape if y_shape[1] != x_shape[1]: # dimensions do not match. raise ValueError( "The outermost dimensions of the input tensors should match. " f"Received y = {y_shape[1]} vs x = {x_shape[1]}." ) x_tile = tf.tile(tf.expand_dims(x_matrix, 1), [1, y_shape[0], 1]) y_tile = tf.tile(tf.expand_dims(y_matrix, 0), [x_shape[0], 1, 1]) return x_tile, y_tile ``` ###Assistant : Aligns x and y tensors to allow computations over pairs of their rows.
1,232
def test_prune_gap_if_dummy_local(self): body = self.helper.send(self.room_id, body="Test", tok=self.token) body = self.helper.send_event( self.room_id, type=EventTypes.Dummy, content={}, tok=self.token ) local_message_event_id = body["event_id"] self.assert_extremities([local_message_event_id]) # Advance the clock for many days to make the old extremity "old". We # also set the depth to "lots". self.reactor.advance(7 * 24 * 60 * 60) # Fudge a second event which points to an event we don't have. This is a # state event so that the state changes (otherwise we won't prune the # extremity as they'll have the same state group). remote_event_2 = event_from_pdu_json( { "type": EventTypes.Member, "state_key": "@user:other2", "content": {"membership": Membership.JOIN}, "room_id": self.room_id, "sender": "@user:other2", "depth": 10000, "prev_events": ["$some_unknown_message"], "auth_events": [], "origin_server_ts": self.clock.time_msec(), }, RoomVersions.V6, ) state_before_gap = self.get_success( self.state.get_current_state_ids(self.room_id) ) self.persist_event(remote_event_2, state=state_before_gap) # Check the new extremity is just the new remote event. self.assert_extremities([remote_event_2.event_id, local_message_event_id])
Test that we don't drop extremities after a gap when the previous extremity is a local dummy event and points to local events.
23
131
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_prune_gap_if_dummy_local(self): body = self.helper.send(self.room_id, body="Test", tok=self.token) body = self.helper.send_event( self.room_id, type=EventTypes.Dummy, content={}, tok=self.token ) local_message_event_id = body["event_id"] self.assert_extremities([local_message_event_id]) # Advance the clock for many days to make the old extremity "old". We # also set the depth to "lots". self.reactor.advance(7 * 24 * 60 * 60) # Fudge a second event which points to an event we don't have. This is a # state event so that the state changes (otherwise we won't prune the # extremity as they'll have the same state group). remote_event_2 = event_from_pdu_json( { "type": EventTypes.Member, "state_key": "@user:other2", "content": {"membership": Membership.JOIN}, "room_id": self.room_id, "sender": "@user:other2", "depth": 10000, "prev_events": ["$some_unknown_message"], "auth_events": [], "origin_server_ts": self.clock.time_msec(), }, RoomVersions.V6, ) state_before_gap = self.get_success( self.state.get_current_state_ids(self.room_id) ) self.persist_event(remote_event_2, state=state_before_gap) # Check the new extremity is just the new remote event. self.assert_extremities([remote_event_2.event_id, local_message_event_id]) ``` ###Assistant : Test that we don't drop extremities after a gap when the previous extremity is a local dummy event and points to local events.
1,233
def _reset_layer_losses(parent_layer): losses_dict = {} for layer in utils.list_all_layers_and_sublayers(parent_layer): losses_dict[layer] = { 'losses': layer._losses[:], 'eager_losses': layer._eager_losses[:] } with utils.no_automatic_dependency_tracking_scope(layer): layer._losses = [] layer._eager_losses = [] return losses_dict
Resets losses of layer and its sublayers, and returns original losses.
11
27
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _reset_layer_losses(parent_layer): losses_dict = {} for layer in utils.list_all_layers_and_sublayers(parent_layer): losses_dict[layer] = { 'losses': layer._losses[:], 'eager_losses': layer._eager_losses[:] } with utils.no_automatic_dependency_tracking_scope(layer): layer._losses = [] layer._eager_losses = [] return losses_dict ``` ###Assistant : Resets losses of layer and its sublayers, and returns original losses.
1,234
def test_result_list_editable_html(self): new_parent = Parent.objects.create(name="parent") new_child = Child.objects.create(name="name", parent=new_parent) request = self.factory.get("/child/") request.user = self.superuser m = ChildAdmin(Child, custom_site) # Test with list_editable fields m.list_display = ["id", "name", "parent"] m.list_display_links = ["id"] m.list_editable = ["name"] cl = m.get_changelist_instance(request) FormSet = m.get_changelist_formset(request) cl.formset = FormSet(queryset=cl.result_list) template = Template( "{% load admin_list %}{% spaceless %}{% result_list cl %}{% endspaceless %}" ) context = Context({"cl": cl, "opts": Child._meta}) table_output = template.render(context) # make sure that hidden fields are in the correct place hiddenfields_div = ( '<div class="hiddenfields">' '<input type="hidden" name="form-0-id" value="%d" id="id_form-0-id">' "</div>" ) % new_child.id self.assertInHTML( hiddenfields_div, table_output, msg_prefix="Failed to find hidden fields" ) # make sure that list editable fields are rendered in divs correctly editable_name_field = ( '<input name="form-0-name" value="name" class="vTextField" ' 'maxlength="30" type="text" id="id_form-0-name">' ) self.assertInHTML( '<td class="field-name">%s</td>' % editable_name_field, table_output, msg_prefix='Failed to find "name" list_editable field', )
Regression tests for #11791: Inclusion tag result_list generates a table and this checks that the items are nested within the table element tags. Also a regression test for #13599, verifies that hidden fields when list_editable is enabled are rendered in a div outside the table.
45
139
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_result_list_editable_html(self): new_parent = Parent.objects.create(name="parent") new_child = Child.objects.create(name="name", parent=new_parent) request = self.factory.get("/child/") request.user = self.superuser m = ChildAdmin(Child, custom_site) # Test with list_editable fields m.list_display = ["id", "name", "parent"] m.list_display_links = ["id"] m.list_editable = ["name"] cl = m.get_changelist_instance(request) FormSet = m.get_changelist_formset(request) cl.formset = FormSet(queryset=cl.result_list) template = Template( "{% load admin_list %}{% spaceless %}{% result_list cl %}{% endspaceless %}" ) context = Context({"cl": cl, "opts": Child._meta}) table_output = template.render(context) # make sure that hidden fields are in the correct place hiddenfields_div = ( '<div class="hiddenfields">' '<input type="hidden" name="form-0-id" value="%d" id="id_form-0-id">' "</div>" ) % new_child.id self.assertInHTML( hiddenfields_div, table_output, msg_prefix="Failed to find hidden fields" ) # make sure that list editable fields are rendered in divs correctly editable_name_field = ( '<input name="form-0-name" value="name" class="vTextField" ' 'maxlength="30" type="text" id="id_form-0-name">' ) self.assertInHTML( '<td class="field-name">%s</td>' % editable_name_field, table_output, msg_prefix='Failed to find "name" list_editable field', ) ``` ###Assistant : Regression tests for #11791: Inclusion tag result_list generates a table and this checks that the items are nested within the table element tags. Also a regression test for #13599, verifies that hidden fields when list_editable is enabled are rendered in a div outside the table.
1,235
def test_title_column(self): root_page = Page.objects.filter(depth=2).first() blog = Site.objects.create( hostname="blog.example.com", site_name="My blog", root_page=root_page ) gallery = Site.objects.create( hostname="gallery.example.com", site_name="My gallery", root_page=root_page ) data = [blog, gallery] table = Table( [ TitleColumn( "hostname", url_name="wagtailsites:edit", link_classname="choose-site", link_attrs={"data-chooser": "yes"}, ), Column("site_name", label="Site name"), ], data, ) html = self.render_component(table) self.assertHTMLEqual( html, % (blog.pk, gallery.pk), )
<table class="listing"> <thead> <tr><th>Hostname</th><th>Site name</th></tr> </thead> <tbody> <tr> <td class="title"> <div class="title-wrapper"> <a href="/admin/sites/%d/" class="choose-site" data-chooser="yes">blog.example.com</a> </div> </td> <td>My blog</td> </tr> <tr> <td class="title"> <div class="title-wrapper"> <a href="/admin/sites/%d/" class="choose-site" data-chooser="yes">gallery.example.com</a> </div> </td> <td>My gallery</td> </tr> </tbody> </table>
37
51
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_title_column(self): root_page = Page.objects.filter(depth=2).first() blog = Site.objects.create( hostname="blog.example.com", site_name="My blog", root_page=root_page ) gallery = Site.objects.create( hostname="gallery.example.com", site_name="My gallery", root_page=root_page ) data = [blog, gallery] table = Table( [ TitleColumn( "hostname", url_name="wagtailsites:edit", link_classname="choose-site", link_attrs={"data-chooser": "yes"}, ), Column("site_name", label="Site name"), ], data, ) html = self.render_component(table) self.assertHTMLEqual( html, % (blog.pk, gallery.pk), ) ``` ###Assistant : <table class="listing"> <thead> <tr><th>Hostname</th><th>Site name</th></tr> </thead> <tbody> <tr> <td class="title"> <div class="title-wrapper"> <a href="/admin/sites/%d/" class="choose-site" data-chooser="yes">blog.example.com</a> </div> </td> <td>My blog</td> </tr> <tr> <td class="title"> <div class="title-wrapper"> <a href="/admin/sites/%d/" class="choose-site" data-chooser="yes">gallery.example.com</a> </div> </td> <td>My gallery</td> </tr> </tbody> </table>
1,236
def _clean_text(self, text): output = [] for char in text: cp = ord(char) if cp == 0 or cp == 0xFFFD or _is_control(char): continue if _is_whitespace(char): output.append(" ") else: output.append(char) return "".join(output)
Performs invalid character removal and whitespace cleanup on text.
9
32
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _clean_text(self, text): output = [] for char in text: cp = ord(char) if cp == 0 or cp == 0xFFFD or _is_control(char): continue if _is_whitespace(char): output.append(" ") else: output.append(char) return "".join(output) ``` ###Assistant : Performs invalid character removal and whitespace cleanup on text.
1,237
def call_candle(self, other_args): if self.symbol: parser = argparse.ArgumentParser( add_help=False, formatter_class=argparse.ArgumentDefaultsHelpFormatter, prog="candle", description=, ) ns_parser = parse_known_args_and_warn( parser, other_args, EXPORT_BOTH_RAW_DATA_AND_FIGURES ) if ns_parser: plot_chart( symbol=self.symbol, currency=self.current_currency, prices_df=self.current_df, )
Process candle commandDisplay chart for loaded coin. You can specify currency vs which you want to show chart and also number of days to get data for.
27
27
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def call_candle(self, other_args): if self.symbol: parser = argparse.ArgumentParser( add_help=False, formatter_class=argparse.ArgumentDefaultsHelpFormatter, prog="candle", description=, ) ns_parser = parse_known_args_and_warn( parser, other_args, EXPORT_BOTH_RAW_DATA_AND_FIGURES ) if ns_parser: plot_chart( symbol=self.symbol, currency=self.current_currency, prices_df=self.current_df, ) ``` ###Assistant : Process candle commandDisplay chart for loaded coin. You can specify currency vs which you want to show chart and also number of days to get data for.
1,238
async def test_load_values_when_added_to_hass(hass): config = { "binary_sensor": { "name": "Test_Binary", "platform": "bayesian", "unique_id": "3b4c9563-5e84-4167-8fe7-8f507e796d72", "device_class": "connectivity", "observations": [ { "platform": "state", "entity_id": "sensor.test_monitored", "to_state": "off", "prob_given_true": 0.8, "prob_given_false": 0.4, } ], "prior": 0.2, "probability_threshold": 0.32, } } hass.states.async_set("sensor.test_monitored", "off") await hass.async_block_till_done() assert await async_setup_component(hass, "binary_sensor", config) await hass.async_block_till_done() entity_registry = async_get_entities(hass) assert ( entity_registry.entities["binary_sensor.test_binary"].unique_id == "bayesian-3b4c9563-5e84-4167-8fe7-8f507e796d72" ) state = hass.states.get("binary_sensor.test_binary") assert state.attributes.get("device_class") == "connectivity" assert state.attributes.get("observations")[0]["prob_given_true"] == 0.8 assert state.attributes.get("observations")[0]["prob_given_false"] == 0.4
Test that sensor initializes with observations of relevant entities.
9
72
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python async def test_load_values_when_added_to_hass(hass): config = { "binary_sensor": { "name": "Test_Binary", "platform": "bayesian", "unique_id": "3b4c9563-5e84-4167-8fe7-8f507e796d72", "device_class": "connectivity", "observations": [ { "platform": "state", "entity_id": "sensor.test_monitored", "to_state": "off", "prob_given_true": 0.8, "prob_given_false": 0.4, } ], "prior": 0.2, "probability_threshold": 0.32, } } hass.states.async_set("sensor.test_monitored", "off") await hass.async_block_till_done() assert await async_setup_component(hass, "binary_sensor", config) await hass.async_block_till_done() entity_registry = async_get_entities(hass) assert ( entity_registry.entities["binary_sensor.test_binary"].unique_id == "bayesian-3b4c9563-5e84-4167-8fe7-8f507e796d72" ) state = hass.states.get("binary_sensor.test_binary") assert state.attributes.get("device_class") == "connectivity" assert state.attributes.get("observations")[0]["prob_given_true"] == 0.8 assert state.attributes.get("observations")[0]["prob_given_false"] == 0.4 ``` ###Assistant : Test that sensor initializes with observations of relevant entities.
1,239
def test_copy_with_target_credential(self): expression = "col1, col2" op = DatabricksCopyIntoOperator( file_location=COPY_FILE_LOCATION, file_format='CSV', table_name='test', task_id=TASK_ID, expression_list=expression, storage_credential='abc', credential={'AZURE_SAS_TOKEN': 'abc'}, ) assert ( op._create_sql_query() == f.strip() )
COPY INTO test WITH (CREDENTIAL abc) FROM (SELECT {expression} FROM '{COPY_FILE_LOCATION}' WITH (CREDENTIAL (AZURE_SAS_TOKEN = 'abc') )) FILEFORMAT = CSV
20
24
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_copy_with_target_credential(self): expression = "col1, col2" op = DatabricksCopyIntoOperator( file_location=COPY_FILE_LOCATION, file_format='CSV', table_name='test', task_id=TASK_ID, expression_list=expression, storage_credential='abc', credential={'AZURE_SAS_TOKEN': 'abc'}, ) assert ( op._create_sql_query() == f.strip() ) ``` ###Assistant : COPY INTO test WITH (CREDENTIAL abc) FROM (SELECT {expression} FROM '{COPY_FILE_LOCATION}' WITH (CREDENTIAL (AZURE_SAS_TOKEN = 'abc') )) FILEFORMAT = CSV
1,240
def show_actual_vendor_versions(vendor_txt_versions): # type: (Dict[str, str]) -> None for module_name, expected_version in vendor_txt_versions.items(): extra_message = '' actual_version = get_vendor_version_from_module(module_name) if not actual_version: extra_message = ' (Unable to locate actual module version, using'\ ' vendor.txt specified version)' actual_version = expected_version elif parse_version(actual_version) != parse_version(expected_version): extra_message = ' (CONFLICT: vendor.txt suggests version should'\ ' be {})'.format(expected_version) logger.info('%s==%s%s', module_name, actual_version, extra_message)
Log the actual version and print extra info if there is a conflict or if the actual version could not be imported.
22
58
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def show_actual_vendor_versions(vendor_txt_versions): # type: (Dict[str, str]) -> None for module_name, expected_version in vendor_txt_versions.items(): extra_message = '' actual_version = get_vendor_version_from_module(module_name) if not actual_version: extra_message = ' (Unable to locate actual module version, using'\ ' vendor.txt specified version)' actual_version = expected_version elif parse_version(actual_version) != parse_version(expected_version): extra_message = ' (CONFLICT: vendor.txt suggests version should'\ ' be {})'.format(expected_version) logger.info('%s==%s%s', module_name, actual_version, extra_message) ``` ###Assistant : Log the actual version and print extra info if there is a conflict or if the actual version could not be imported.
1,241
def escape_rfc3986(s): if sys.version_info < (3, 0) and isinstance(s, compat_str): s = s.encode('utf-8') # ensure unicode: after quoting, it can always be converted return compat_str(compat_urllib_parse.quote(s, b"%/;:@&=+$,!~*'()?#[]"))
Escape non-ASCII characters as suggested by RFC 3986
8
26
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def escape_rfc3986(s): if sys.version_info < (3, 0) and isinstance(s, compat_str): s = s.encode('utf-8') # ensure unicode: after quoting, it can always be converted return compat_str(compat_urllib_parse.quote(s, b"%/;:@&=+$,!~*'()?#[]")) ``` ###Assistant : Escape non-ASCII characters as suggested by RFC 3986
1,242
def call_social(self, other_args): parser = argparse.ArgumentParser( add_help=False, formatter_class=argparse.ArgumentDefaultsHelpFormatter, prog="social", description=, ) ns_parser = parse_known_args_and_warn( parser, other_args, EXPORT_ONLY_RAW_DATA_ALLOWED ) if ns_parser: pycoingecko_view.display_social( self.coin_map_df["CoinGecko"], export=ns_parser.export )
Process social commandShows social media corresponding to loaded coin. You can find there name of telegram channel, urls to twitter, reddit, bitcointalk, facebook and discord.
25
24
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def call_social(self, other_args): parser = argparse.ArgumentParser( add_help=False, formatter_class=argparse.ArgumentDefaultsHelpFormatter, prog="social", description=, ) ns_parser = parse_known_args_and_warn( parser, other_args, EXPORT_ONLY_RAW_DATA_ALLOWED ) if ns_parser: pycoingecko_view.display_social( self.coin_map_df["CoinGecko"], export=ns_parser.export ) ``` ###Assistant : Process social commandShows social media corresponding to loaded coin. You can find there name of telegram channel, urls to twitter, reddit, bitcointalk, facebook and discord.
1,243
def test_ignores_different_group(self): url = reverse( "sentry-api-0-project-event-details", kwargs={ "event_id": self.next_transaction_event.event_id, "project_slug": self.next_transaction_event.project.slug, "organization_slug": self.next_transaction_event.project.organization.slug, }, ) with self.feature("organizations:performance-issues"): response = self.client.get(url, format="json", data={"group_id": self.group.id}) assert response.status_code == 200, response.content assert response.data["id"] == str(self.next_transaction_event.event_id) assert response.data["nextEventID"] is None
Test that a different group's events aren't attributed to the one that was passed
14
36
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_ignores_different_group(self): url = reverse( "sentry-api-0-project-event-details", kwargs={ "event_id": self.next_transaction_event.event_id, "project_slug": self.next_transaction_event.project.slug, "organization_slug": self.next_transaction_event.project.organization.slug, }, ) with self.feature("organizations:performance-issues"): response = self.client.get(url, format="json", data={"group_id": self.group.id}) assert response.status_code == 200, response.content assert response.data["id"] == str(self.next_transaction_event.event_id) assert response.data["nextEventID"] is None ``` ###Assistant : Test that a different group's events aren't attributed to the one that was passed
1,244
def _dedupe_indices(new, exclude, index_structure): inds_self = set(exclude) dums_new = set(get_dummy_indices(new)) conflicts = dums_new.intersection(inds_self) if len(conflicts) == 0: return None inds_self.update(dums_new) self_args_free = [(i, None) for i in inds_self] gen = index_structure._get_generator_for_dummy_indices(self_args_free) repl = {} for d in conflicts: if -d in repl.keys(): continue newname = gen(d.tensor_index_type) new_d = d.func(newname, *d.args[1:]) repl[d] = new_d repl[-d] = -new_d if len(repl) == 0: return None new_renamed = new._replace_indices(repl) return new_renamed
exclude: set new: TensExpr index_structure: _IndexStructure (required to generate new dummy indices) If ``new`` has any dummy indices that are in ``exclude``, return a version of new with those indices replaced. If no replacements are needed, return None ``self_args_free`` is to be passed to ``_IndexStructure._get_generator_for_dummy_indices()``. Since the latter does not use the index position for anything, we just set it as ``None`` here.
63
67
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _dedupe_indices(new, exclude, index_structure): inds_self = set(exclude) dums_new = set(get_dummy_indices(new)) conflicts = dums_new.intersection(inds_self) if len(conflicts) == 0: return None inds_self.update(dums_new) self_args_free = [(i, None) for i in inds_self] gen = index_structure._get_generator_for_dummy_indices(self_args_free) repl = {} for d in conflicts: if -d in repl.keys(): continue newname = gen(d.tensor_index_type) new_d = d.func(newname, *d.args[1:]) repl[d] = new_d repl[-d] = -new_d if len(repl) == 0: return None new_renamed = new._replace_indices(repl) return new_renamed ``` ###Assistant : exclude: set new: TensExpr index_structure: _IndexStructure (required to generate new dummy indices) If ``new`` has any dummy indices that are in ``exclude``, return a version of new with those indices replaced. If no replacements are needed, return None ``self_args_free`` is to be passed to ``_IndexStructure._get_generator_for_dummy_indices()``. Since the latter does not use the index position for anything, we just set it as ``None`` here.
1,245
def get_window_extent(self, renderer=None): # make sure the location is updated so that transforms etc are correct: self._adjust_location() bb = super().get_window_extent(renderer=renderer) if self.axis is None or not self.axis.get_visible(): return bb bboxes = [bb] drawn_ticks = self.axis._update_ticks() major_tick = next(iter({*drawn_ticks} & {*self.axis.majorTicks}), None) minor_tick = next(iter({*drawn_ticks} & {*self.axis.minorTicks}), None) for tick in [major_tick, minor_tick]: if tick is None: continue bb0 = bb.frozen() tickl = tick._size tickdir = tick._tickdir if tickdir == 'out': padout = 1 padin = 0 elif tickdir == 'in': padout = 0 padin = 1 else: padout = 0.5 padin = 0.5 padout = padout * tickl / 72 * self.figure.dpi padin = padin * tickl / 72 * self.figure.dpi if tick.tick1line.get_visible(): if self.spine_type == 'left': bb0.x0 = bb0.x0 - padout bb0.x1 = bb0.x1 + padin elif self.spine_type == 'bottom': bb0.y0 = bb0.y0 - padout bb0.y1 = bb0.y1 + padin if tick.tick2line.get_visible(): if self.spine_type == 'right': bb0.x1 = bb0.x1 + padout bb0.x0 = bb0.x0 - padin elif self.spine_type == 'top': bb0.y1 = bb0.y1 + padout bb0.y0 = bb0.y0 - padout bboxes.append(bb0) return mtransforms.Bbox.union(bboxes)
Return the window extent of the spines in display space, including padding for ticks (but not their labels) See Also -------- matplotlib.axes.Axes.get_tightbbox matplotlib.axes.Axes.get_window_extent
23
174
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_window_extent(self, renderer=None): # make sure the location is updated so that transforms etc are correct: self._adjust_location() bb = super().get_window_extent(renderer=renderer) if self.axis is None or not self.axis.get_visible(): return bb bboxes = [bb] drawn_ticks = self.axis._update_ticks() major_tick = next(iter({*drawn_ticks} & {*self.axis.majorTicks}), None) minor_tick = next(iter({*drawn_ticks} & {*self.axis.minorTicks}), None) for tick in [major_tick, minor_tick]: if tick is None: continue bb0 = bb.frozen() tickl = tick._size tickdir = tick._tickdir if tickdir == 'out': padout = 1 padin = 0 elif tickdir == 'in': padout = 0 padin = 1 else: padout = 0.5 padin = 0.5 padout = padout * tickl / 72 * self.figure.dpi padin = padin * tickl / 72 * self.figure.dpi if tick.tick1line.get_visible(): if self.spine_type == 'left': bb0.x0 = bb0.x0 - padout bb0.x1 = bb0.x1 + padin elif self.spine_type == 'bottom': bb0.y0 = bb0.y0 - padout bb0.y1 = bb0.y1 + padin if tick.tick2line.get_visible(): if self.spine_type == 'right': bb0.x1 = bb0.x1 + padout bb0.x0 = bb0.x0 - padin elif self.spine_type == 'top': bb0.y1 = bb0.y1 + padout bb0.y0 = bb0.y0 - padout bboxes.append(bb0) return mtransforms.Bbox.union(bboxes) ``` ###Assistant : Return the window extent of the spines in display space, including padding for ticks (but not their labels) See Also -------- matplotlib.axes.Axes.get_tightbbox matplotlib.axes.Axes.get_window_extent
1,246
def apply_support(self, location, type): if location not in self._node_labels: raise ValueError("Support must be added on a known node") else: self._supports[location] = type if type == "pinned": self._loads['R_'+str(location)+'_x']= [] self._loads['R_'+str(location)+'_y']= [] elif type == "roller": self._loads['R_'+str(location)+'_y']= [] if 'R_'+str(location)+'_x' in list(self._loads): self._loads.pop('R_'+str(location)+'_x')
This method adds a pinned or roller support at a particular node Parameters ========== location: String or Symbol Label of the Node at which support is added. type: String Type of the support being provided at the node. Examples ======== >>> from sympy.physics.continuum_mechanics.truss import Truss >>> t = Truss() >>> t.add_node('A', 0, 0) >>> t.add_node('B', 3, 0) >>> t.apply_support('A', 'pinned') >>> t.supports {'A': 'pinned', 'B': 'none'}
66
41
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def apply_support(self, location, type): if location not in self._node_labels: raise ValueError("Support must be added on a known node") else: self._supports[location] = type if type == "pinned": self._loads['R_'+str(location)+'_x']= [] self._loads['R_'+str(location)+'_y']= [] elif type == "roller": self._loads['R_'+str(location)+'_y']= [] if 'R_'+str(location)+'_x' in list(self._loads): self._loads.pop('R_'+str(location)+'_x') ``` ###Assistant : This method adds a pinned or roller support at a particular node Parameters ========== location: String or Symbol Label of the Node at which support is added. type: String Type of the support being provided at the node. Examples ======== >>> from sympy.physics.continuum_mechanics.truss import Truss >>> t = Truss() >>> t.add_node('A', 0, 0) >>> t.add_node('B', 3, 0) >>> t.apply_support('A', 'pinned') >>> t.supports {'A': 'pinned', 'B': 'none'}
1,247
def timers(self) -> list[dict[str, Any]]: return [ { "enabled": timer.enabled, "cron": timer.cron, "next_schedule": as_utc(timer.next_schedule), } for timer in self.coordinator.data.timers ]
Get the list of added timers of the vacuum cleaner.
10
20
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def timers(self) -> list[dict[str, Any]]: return [ { "enabled": timer.enabled, "cron": timer.cron, "next_schedule": as_utc(timer.next_schedule), } for timer in self.coordinator.data.timers ] ``` ###Assistant : Get the list of added timers of the vacuum cleaner.
1,248
def plot_avg_pitch(pitch, chars, fig_size=(30, 10), output_fig=False): old_fig_size = plt.rcParams["figure.figsize"] if fig_size is not None: plt.rcParams["figure.figsize"] = fig_size fig, ax = plt.subplots() x = np.array(range(len(chars))) my_xticks = [c for c in chars] plt.xticks(x, my_xticks) ax.set_xlabel("characters") ax.set_ylabel("freq") ax2 = ax.twinx() ax2.plot(pitch, linewidth=5.0, color="red") ax2.set_ylabel("F0") plt.rcParams["figure.figsize"] = old_fig_size if not output_fig: plt.close() return fig
Plot pitch curves on top of the input characters. Args: pitch (np.array): Pitch values. chars (str): Characters to place to the x-axis. Shapes: pitch: :math:`(T,)`
25
51
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def plot_avg_pitch(pitch, chars, fig_size=(30, 10), output_fig=False): old_fig_size = plt.rcParams["figure.figsize"] if fig_size is not None: plt.rcParams["figure.figsize"] = fig_size fig, ax = plt.subplots() x = np.array(range(len(chars))) my_xticks = [c for c in chars] plt.xticks(x, my_xticks) ax.set_xlabel("characters") ax.set_ylabel("freq") ax2 = ax.twinx() ax2.plot(pitch, linewidth=5.0, color="red") ax2.set_ylabel("F0") plt.rcParams["figure.figsize"] = old_fig_size if not output_fig: plt.close() return fig ``` ###Assistant : Plot pitch curves on top of the input characters. Args: pitch (np.array): Pitch values. chars (str): Characters to place to the x-axis. Shapes: pitch: :math:`(T,)`
1,249
def composite(nth): n = as_int(nth) if n < 1: raise ValueError("nth must be a positive integer; composite(1) == 4") composite_arr = [4, 6, 8, 9, 10, 12, 14, 15, 16, 18] if n <= 10: return composite_arr[n - 1] a, b = 4, sieve._list[-1] if n <= b - primepi(b) - 1: while a < b - 1: mid = (a + b) >> 1 if mid - primepi(mid) - 1 > n: b = mid else: a = mid if isprime(a): a -= 1 return a from sympy.functions.elementary.exponential import log from sympy.functions.special.error_functions import li a = 4 # Lower bound for binary search b = int(n*(log(n) + log(log(n)))) # Upper bound for the search. while a < b: mid = (a + b) >> 1 if mid - li(mid) - 1 > n: b = mid else: a = mid + 1 n_composites = a - primepi(a) - 1 while n_composites > n: if not isprime(a): n_composites -= 1 a -= 1 if isprime(a): a -= 1 return a
Return the nth composite number, with the composite numbers indexed as composite(1) = 4, composite(2) = 6, etc.... Examples ======== >>> from sympy import composite >>> composite(36) 52 >>> composite(1) 4 >>> composite(17737) 20000 See Also ======== sympy.ntheory.primetest.isprime : Test if n is prime primerange : Generate all primes in a given range primepi : Return the number of primes less than or equal to n prime : Return the nth prime compositepi : Return the number of positive composite numbers less than or equal to n
87
170
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def composite(nth): n = as_int(nth) if n < 1: raise ValueError("nth must be a positive integer; composite(1) == 4") composite_arr = [4, 6, 8, 9, 10, 12, 14, 15, 16, 18] if n <= 10: return composite_arr[n - 1] a, b = 4, sieve._list[-1] if n <= b - primepi(b) - 1: while a < b - 1: mid = (a + b) >> 1 if mid - primepi(mid) - 1 > n: b = mid else: a = mid if isprime(a): a -= 1 return a from sympy.functions.elementary.exponential import log from sympy.functions.special.error_functions import li a = 4 # Lower bound for binary search b = int(n*(log(n) + log(log(n)))) # Upper bound for the search. while a < b: mid = (a + b) >> 1 if mid - li(mid) - 1 > n: b = mid else: a = mid + 1 n_composites = a - primepi(a) - 1 while n_composites > n: if not isprime(a): n_composites -= 1 a -= 1 if isprime(a): a -= 1 return a ``` ###Assistant : Return the nth composite number, with the composite numbers indexed as composite(1) = 4, composite(2) = 6, etc.... Examples ======== >>> from sympy import composite >>> composite(36) 52 >>> composite(1) 4 >>> composite(17737) 20000 See Also ======== sympy.ntheory.primetest.isprime : Test if n is prime primerange : Generate all primes in a given range primepi : Return the number of primes less than or equal to n prime : Return the nth prime compositepi : Return the number of positive composite numbers less than or equal to n
1,250
def histogram2d(x, y, bins=10, range=None, density=None, weights=None): from numpy import histogramdd if len(x) != len(y): raise ValueError('x and y must have the same length.') try: N = len(bins) except TypeError: N = 1 if N != 1 and N != 2: xedges = yedges = asarray(bins) bins = [xedges, yedges] hist, edges = histogramdd([x, y], bins, range, density, weights) return hist, edges[0], edges[1] @set_module('numpy')
Compute the bi-dimensional histogram of two data samples. Parameters ---------- x : array_like, shape (N,) An array containing the x coordinates of the points to be histogrammed. y : array_like, shape (N,) An array containing the y coordinates of the points to be histogrammed. bins : int or array_like or [int, int] or [array, array], optional The bin specification: * If int, the number of bins for the two dimensions (nx=ny=bins). * If array_like, the bin edges for the two dimensions (x_edges=y_edges=bins). * If [int, int], the number of bins in each dimension (nx, ny = bins). * If [array, array], the bin edges in each dimension (x_edges, y_edges = bins). * A combination [int, array] or [array, int], where int is the number of bins and array is the bin edges. range : array_like, shape(2,2), optional The leftmost and rightmost edges of the bins along each dimension (if not specified explicitly in the `bins` parameters): ``[[xmin, xmax], [ymin, ymax]]``. All values outside of this range will be considered outliers and not tallied in the histogram. density : bool, optional If False, the default, returns the number of samples in each bin. If True, returns the probability *density* function at the bin, ``bin_count / sample_count / bin_area``. weights : array_like, shape(N,), optional An array of values ``w_i`` weighing each sample ``(x_i, y_i)``. Weights are normalized to 1 if `density` is True. If `density` is False, the values of the returned histogram are equal to the sum of the weights belonging to the samples falling into each bin. Returns ------- H : ndarray, shape(nx, ny) The bi-dimensional histogram of samples `x` and `y`. Values in `x` are histogrammed along the first dimension and values in `y` are histogrammed along the second dimension. xedges : ndarray, shape(nx+1,) The bin edges along the first dimension. yedges : ndarray, shape(ny+1,) The bin edges along the second dimension. See Also -------- histogram : 1D histogram histogramdd : Multidimensional histogram Notes ----- When `density` is True, then the returned histogram is the sample density, defined such that the sum over bins of the product ``bin_value * bin_area`` is 1. Please note that the histogram does not follow the Cartesian convention where `x` values are on the abscissa and `y` values on the ordinate axis. Rather, `x` is histogrammed along the first dimension of the array (vertical), and `y` along the second dimension of the array (horizontal). This ensures compatibility with `histogramdd`. Examples -------- >>> from matplotlib.image import NonUniformImage >>> import matplotlib.pyplot as plt Construct a 2-D histogram with variable bin width. First define the bin edges: >>> xedges = [0, 1, 3, 5] >>> yedges = [0, 2, 3, 4, 6] Next we create a histogram H with random bin content: >>> x = np.random.normal(2, 1, 100) >>> y = np.random.normal(1, 1, 100) >>> H, xedges, yedges = np.histogram2d(x, y, bins=(xedges, yedges)) >>> # Histogram does not follow Cartesian convention (see Notes), >>> # therefore transpose H for visualization purposes. >>> H = H.T :func:`imshow <matplotlib.pyplot.imshow>` can only display square bins: >>> fig = plt.figure(figsize=(7, 3)) >>> ax = fig.add_subplot(131, title='imshow: square bins') >>> plt.imshow(H, interpolation='nearest', origin='lower', ... extent=[xedges[0], xedges[-1], yedges[0], yedges[-1]]) <matplotlib.image.AxesImage object at 0x...> :func:`pcolormesh <matplotlib.pyplot.pcolormesh>` can display actual edges: >>> ax = fig.add_subplot(132, title='pcolormesh: actual edges', ... aspect='equal') >>> X, Y = np.meshgrid(xedges, yedges) >>> ax.pcolormesh(X, Y, H) <matplotlib.collections.QuadMesh object at 0x...> :class:`NonUniformImage <matplotlib.image.NonUniformImage>` can be used to display actual bin edges with interpolation: >>> ax = fig.add_subplot(133, title='NonUniformImage: interpolated', ... aspect='equal', xlim=xedges[[0, -1]], ylim=yedges[[0, -1]]) >>> im = NonUniformImage(ax, interpolation='bilinear') >>> xcenters = (xedges[:-1] + xedges[1:]) / 2 >>> ycenters = (yedges[:-1] + yedges[1:]) / 2 >>> im.set_data(xcenters, ycenters, H) >>> ax.images.append(im) >>> plt.show() It is also possible to construct a 2-D histogram without specifying bin edges: >>> # Generate non-symmetric test data >>> n = 10000 >>> x = np.linspace(1, 100, n) >>> y = 2*np.log(x) + np.random.rand(n) - 0.5 >>> # Compute 2d histogram. Note the order of x/y and xedges/yedges >>> H, yedges, xedges = np.histogram2d(y, x, bins=20) Now we can plot the histogram using :func:`pcolormesh <matplotlib.pyplot.pcolormesh>`, and a :func:`hexbin <matplotlib.pyplot.hexbin>` for comparison. >>> # Plot histogram using pcolormesh >>> fig, (ax1, ax2) = plt.subplots(ncols=2, sharey=True) >>> ax1.pcolormesh(xedges, yedges, H, cmap='rainbow') >>> ax1.plot(x, 2*np.log(x), 'k-') >>> ax1.set_xlim(x.min(), x.max()) >>> ax1.set_ylim(y.min(), y.max()) >>> ax1.set_xlabel('x') >>> ax1.set_ylabel('y') >>> ax1.set_title('histogram2d') >>> ax1.grid() >>> # Create hexbin plot for comparison >>> ax2.hexbin(x, y, gridsize=20, cmap='rainbow') >>> ax2.plot(x, 2*np.log(x), 'k-') >>> ax2.set_title('hexbin') >>> ax2.set_xlim(x.min(), x.max()) >>> ax2.set_xlabel('x') >>> ax2.grid() >>> plt.show()
747
64
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def histogram2d(x, y, bins=10, range=None, density=None, weights=None): from numpy import histogramdd if len(x) != len(y): raise ValueError('x and y must have the same length.') try: N = len(bins) except TypeError: N = 1 if N != 1 and N != 2: xedges = yedges = asarray(bins) bins = [xedges, yedges] hist, edges = histogramdd([x, y], bins, range, density, weights) return hist, edges[0], edges[1] @set_module('numpy') ``` ###Assistant : Compute the bi-dimensional histogram of two data samples. Parameters ---------- x : array_like, shape (N,) An array containing the x coordinates of the points to be histogrammed. y : array_like, shape (N,) An array containing the y coordinates of the points to be histogrammed. bins : int or array_like or [int, int] or [array, array], optional The bin specification: * If int, the number of bins for the two dimensions (nx=ny=bins). * If array_like, the bin edges for the two dimensions (x_edges=y_edges=bins). * If [int, int], the number of bins in each dimension (nx, ny = bins). * If [array, array], the bin edges in each dimension (x_edges, y_edges = bins). * A combination [int, array] or [array, int], where int is the number of bins and array is the bin edges. range : array_like, shape(2,2), optional The leftmost and rightmost edges of the bins along each dimension (if not specified explicitly in the `bins` parameters): ``[[xmin, xmax], [ymin, ymax]]``. All values outside of this range will be considered outliers and not tallied in the histogram. density : bool, optional If False, the default, returns the number of samples in each bin. If True, returns the probability *density* function at the bin, ``bin_count / sample_count / bin_area``. weights : array_like, shape(N,), optional An array of values ``w_i`` weighing each sample ``(x_i, y_i)``. Weights are normalized to 1 if `density` is True. If `density` is False, the values of the returned histogram are equal to the sum of the weights belonging to the samples falling into each bin. Returns ------- H : ndarray, shape(nx, ny) The bi-dimensional histogram of samples `x` and `y`. Values in `x` are histogrammed along the first dimension and values in `y` are histogrammed along the second dimension. xedges : ndarray, shape(nx+1,) The bin edges along the first dimension. yedges : ndarray, shape(ny+1,) The bin edges along the second dimension. See Also -------- histogram : 1D histogram histogramdd : Multidimensional histogram Notes ----- When `density` is True, then the returned histogram is the sample density, defined such that the sum over bins of the product ``bin_value * bin_area`` is 1. Please note that the histogram does not follow the Cartesian convention where `x` values are on the abscissa and `y` values on the ordinate axis. Rather, `x` is histogrammed along the first dimension of the array (vertical), and `y` along the second dimension of the array (horizontal). This ensures compatibility with `histogramdd`. Examples -------- >>> from matplotlib.image import NonUniformImage >>> import matplotlib.pyplot as plt Construct a 2-D histogram with variable bin width. First define the bin edges: >>> xedges = [0, 1, 3, 5] >>> yedges = [0, 2, 3, 4, 6] Next we create a histogram H with random bin content: >>> x = np.random.normal(2, 1, 100) >>> y = np.random.normal(1, 1, 100) >>> H, xedges, yedges = np.histogram2d(x, y, bins=(xedges, yedges)) >>> # Histogram does not follow Cartesian convention (see Notes), >>> # therefore transpose H for visualization purposes. >>> H = H.T :func:`imshow <matplotlib.pyplot.imshow>` can only display square bins: >>> fig = plt.figure(figsize=(7, 3)) >>> ax = fig.add_subplot(131, title='imshow: square bins') >>> plt.imshow(H, interpolation='nearest', origin='lower', ... extent=[xedges[0], xedges[-1], yedges[0], yedges[-1]]) <matplotlib.image.AxesImage object at 0x...> :func:`pcolormesh <matplotlib.pyplot.pcolormesh>` can display actual edges: >>> ax = fig.add_subplot(132, title='pcolormesh: actual edges', ... aspect='equal') >>> X, Y = np.meshgrid(xedges, yedges) >>> ax.pcolormesh(X, Y, H) <matplotlib.collections.QuadMesh object at 0x...> :class:`NonUniformImage <matplotlib.image.NonUniformImage>` can be used to display actual bin edges with interpolation: >>> ax = fig.add_subplot(133, title='NonUniformImage: interpolated', ... aspect='equal', xlim=xedges[[0, -1]], ylim=yedges[[0, -1]]) >>> im = NonUniformImage(ax, interpolation='bilinear') >>> xcenters = (xedges[:-1] + xedges[1:]) / 2 >>> ycenters = (yedges[:-1] + yedges[1:]) / 2 >>> im.set_data(xcenters, ycenters, H) >>> ax.images.append(im) >>> plt.show() It is also possible to construct a 2-D histogram without specifying bin edges: >>> # Generate non-symmetric test data >>> n = 10000 >>> x = np.linspace(1, 100, n) >>> y = 2*np.log(x) + np.random.rand(n) - 0.5 >>> # Compute 2d histogram. Note the order of x/y and xedges/yedges >>> H, yedges, xedges = np.histogram2d(y, x, bins=20) Now we can plot the histogram using :func:`pcolormesh <matplotlib.pyplot.pcolormesh>`, and a :func:`hexbin <matplotlib.pyplot.hexbin>` for comparison. >>> # Plot histogram using pcolormesh >>> fig, (ax1, ax2) = plt.subplots(ncols=2, sharey=True) >>> ax1.pcolormesh(xedges, yedges, H, cmap='rainbow') >>> ax1.plot(x, 2*np.log(x), 'k-') >>> ax1.set_xlim(x.min(), x.max()) >>> ax1.set_ylim(y.min(), y.max()) >>> ax1.set_xlabel('x') >>> ax1.set_ylabel('y') >>> ax1.set_title('histogram2d') >>> ax1.grid() >>> # Create hexbin plot for comparison >>> ax2.hexbin(x, y, gridsize=20, cmap='rainbow') >>> ax2.plot(x, 2*np.log(x), 'k-') >>> ax2.set_title('hexbin') >>> ax2.set_xlim(x.min(), x.max()) >>> ax2.set_xlabel('x') >>> ax2.grid() >>> plt.show()
1,251
def available(self) -> bool: return self._device is not None and self._device.profile_device.available
Device is available when we have a connection to it.
10
11
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def available(self) -> bool: return self._device is not None and self._device.profile_device.available ``` ###Assistant : Device is available when we have a connection to it.
1,252
def _get_input_from_iterator(iterator, model): next_element = iterator.get_next() # `len(nest.flatten(x))` is going to not count empty elements such as {}. # len(nest.flatten([[0,1,2], {}])) is 3 and not 4. The `next_element` is # going to get flattened in `_prepare_feed_values` to work around that. Empty # elements are going to get filtered out as part of the flattening. if len(tf.nest.flatten(next_element)) == len(model.inputs): x = next_element y = None sample_weights = None elif len(tf.nest.flatten(next_element)) == ( len(model.inputs) + len(model.outputs) ): x, y = next_element sample_weights = None else: x, y, sample_weights = next_element # Validate that all the elements in x and y are of the same type and shape. validate_distributed_dataset_inputs( model._distribution_strategy, x, y, sample_weights ) return x, y, sample_weights
Get elements from the iterator and verify the input shape and type.
12
115
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _get_input_from_iterator(iterator, model): next_element = iterator.get_next() # `len(nest.flatten(x))` is going to not count empty elements such as {}. # len(nest.flatten([[0,1,2], {}])) is 3 and not 4. The `next_element` is # going to get flattened in `_prepare_feed_values` to work around that. Empty # elements are going to get filtered out as part of the flattening. if len(tf.nest.flatten(next_element)) == len(model.inputs): x = next_element y = None sample_weights = None elif len(tf.nest.flatten(next_element)) == ( len(model.inputs) + len(model.outputs) ): x, y = next_element sample_weights = None else: x, y, sample_weights = next_element # Validate that all the elements in x and y are of the same type and shape. validate_distributed_dataset_inputs( model._distribution_strategy, x, y, sample_weights ) return x, y, sample_weights ``` ###Assistant : Get elements from the iterator and verify the input shape and type.
1,253
def inner(a, b): return (a, b) @array_function_from_c_func_and_dispatcher(_multiarray_umath.where)
inner(a, b, /) Inner product of two arrays. Ordinary inner product of vectors for 1-D arrays (without complex conjugation), in higher dimensions a sum product over the last axes. Parameters ---------- a, b : array_like If `a` and `b` are nonscalar, their last dimensions must match. Returns ------- out : ndarray If `a` and `b` are both scalars or both 1-D arrays then a scalar is returned; otherwise an array is returned. ``out.shape = (*a.shape[:-1], *b.shape[:-1])`` Raises ------ ValueError If both `a` and `b` are nonscalar and their last dimensions have different sizes. See Also -------- tensordot : Sum products over arbitrary axes. dot : Generalised matrix product, using second last dimension of `b`. einsum : Einstein summation convention. Notes ----- For vectors (1-D arrays) it computes the ordinary inner-product:: np.inner(a, b) = sum(a[:]*b[:]) More generally, if ``ndim(a) = r > 0`` and ``ndim(b) = s > 0``:: np.inner(a, b) = np.tensordot(a, b, axes=(-1,-1)) or explicitly:: np.inner(a, b)[i0,...,ir-2,j0,...,js-2] = sum(a[i0,...,ir-2,:]*b[j0,...,js-2,:]) In addition `a` or `b` may be scalars, in which case:: np.inner(a,b) = a*b Examples -------- Ordinary inner product for vectors: >>> a = np.array([1,2,3]) >>> b = np.array([0,1,0]) >>> np.inner(a, b) 2 Some multidimensional examples: >>> a = np.arange(24).reshape((2,3,4)) >>> b = np.arange(4) >>> c = np.inner(a, b) >>> c.shape (2, 3) >>> c array([[ 14, 38, 62], [ 86, 110, 134]]) >>> a = np.arange(2).reshape((1,1,2)) >>> b = np.arange(6).reshape((3,2)) >>> c = np.inner(a, b) >>> c.shape (1, 1, 3) >>> c array([[[1, 3, 5]]]) An example where `b` is a scalar: >>> np.inner(np.eye(2), 7) array([[7., 0.], [0., 7.]])
260
7
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def inner(a, b): return (a, b) @array_function_from_c_func_and_dispatcher(_multiarray_umath.where) ``` ###Assistant : inner(a, b, /) Inner product of two arrays. Ordinary inner product of vectors for 1-D arrays (without complex conjugation), in higher dimensions a sum product over the last axes. Parameters ---------- a, b : array_like If `a` and `b` are nonscalar, their last dimensions must match. Returns ------- out : ndarray If `a` and `b` are both scalars or both 1-D arrays then a scalar is returned; otherwise an array is returned. ``out.shape = (*a.shape[:-1], *b.shape[:-1])`` Raises ------ ValueError If both `a` and `b` are nonscalar and their last dimensions have different sizes. See Also -------- tensordot : Sum products over arbitrary axes. dot : Generalised matrix product, using second last dimension of `b`. einsum : Einstein summation convention. Notes ----- For vectors (1-D arrays) it computes the ordinary inner-product:: np.inner(a, b) = sum(a[:]*b[:]) More generally, if ``ndim(a) = r > 0`` and ``ndim(b) = s > 0``:: np.inner(a, b) = np.tensordot(a, b, axes=(-1,-1)) or explicitly:: np.inner(a, b)[i0,...,ir-2,j0,...,js-2] = sum(a[i0,...,ir-2,:]*b[j0,...,js-2,:]) In addition `a` or `b` may be scalars, in which case:: np.inner(a,b) = a*b Examples -------- Ordinary inner product for vectors: >>> a = np.array([1,2,3]) >>> b = np.array([0,1,0]) >>> np.inner(a, b) 2 Some multidimensional examples: >>> a = np.arange(24).reshape((2,3,4)) >>> b = np.arange(4) >>> c = np.inner(a, b) >>> c.shape (2, 3) >>> c array([[ 14, 38, 62], [ 86, 110, 134]]) >>> a = np.arange(2).reshape((1,1,2)) >>> b = np.arange(6).reshape((3,2)) >>> c = np.inner(a, b) >>> c.shape (1, 1, 3) >>> c array([[[1, 3, 5]]]) An example where `b` is a scalar: >>> np.inner(np.eye(2), 7) array([[7., 0.], [0., 7.]])
1,254
def contains(self, mouseevent): inside, info = self._default_contains(mouseevent) if inside is not None: return inside, info if not self.get_visible(): return False, {} pickradius = ( float(self._picker) if isinstance(self._picker, Number) and self._picker is not True # the bool, not just nonzero or 1 else self._pickradius) if self.axes: self.axes._unstale_viewLim() transform, offset_trf, offsets, paths = self._prepare_points() # Tests if the point is contained on one of the polygons formed # by the control points of each of the paths. A point is considered # "on" a path if it would lie within a stroke of width 2*pickradius # following the path. If pickradius <= 0, then we instead simply check # if the point is *inside* of the path instead. ind = _path.point_in_path_collection( mouseevent.x, mouseevent.y, pickradius, transform.frozen(), paths, self.get_transforms(), offsets, offset_trf, pickradius <= 0) return len(ind) > 0, dict(ind=ind)
Test whether the mouse event occurred in the collection. Returns ``bool, dict(ind=itemlist)``, where every item in itemlist contains the event.
20
135
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def contains(self, mouseevent): inside, info = self._default_contains(mouseevent) if inside is not None: return inside, info if not self.get_visible(): return False, {} pickradius = ( float(self._picker) if isinstance(self._picker, Number) and self._picker is not True # the bool, not just nonzero or 1 else self._pickradius) if self.axes: self.axes._unstale_viewLim() transform, offset_trf, offsets, paths = self._prepare_points() # Tests if the point is contained on one of the polygons formed # by the control points of each of the paths. A point is considered # "on" a path if it would lie within a stroke of width 2*pickradius # following the path. If pickradius <= 0, then we instead simply check # if the point is *inside* of the path instead. ind = _path.point_in_path_collection( mouseevent.x, mouseevent.y, pickradius, transform.frozen(), paths, self.get_transforms(), offsets, offset_trf, pickradius <= 0) return len(ind) > 0, dict(ind=ind) ``` ###Assistant : Test whether the mouse event occurred in the collection. Returns ``bool, dict(ind=itemlist)``, where every item in itemlist contains the event.
1,255
def deployments(self) -> List[Dict]: return [ { 'name': self.name, 'head_host': self.head_host, 'head_port_in': self.head_port_in, } ]
Get deployments of the deployment. The BaseDeployment just gives one deployment. :return: list of deployments
15
15
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def deployments(self) -> List[Dict]: return [ { 'name': self.name, 'head_host': self.head_host, 'head_port_in': self.head_port_in, } ] ``` ###Assistant : Get deployments of the deployment. The BaseDeployment just gives one deployment. :return: list of deployments
1,256
def parse_content_disposition(reply): is_inline = True filename = None content_disposition_header = b'Content-Disposition' # First check if the Content-Disposition header has a filename # attribute. if reply.hasRawHeader(content_disposition_header): # We use the unsafe variant of the filename as we sanitize it via # os.path.basename later. try: value = bytes(reply.rawHeader(content_disposition_header)) log.network.debug("Parsing Content-Disposition: {value!r}") content_disposition = ContentDisposition.parse(value) filename = content_disposition.filename() except ContentDispositionError as e: log.network.error(f"Error while parsing filename: {e}") else: is_inline = content_disposition.is_inline() # Then try to get filename from url if not filename: filename = reply.url().path().rstrip('/') # If that fails as well, use a fallback if not filename: filename = 'qutebrowser-download' return is_inline, os.path.basename(filename)
Parse a content_disposition header. Args: reply: The QNetworkReply to get a filename for. Return: A (is_inline, filename) tuple.
18
100
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def parse_content_disposition(reply): is_inline = True filename = None content_disposition_header = b'Content-Disposition' # First check if the Content-Disposition header has a filename # attribute. if reply.hasRawHeader(content_disposition_header): # We use the unsafe variant of the filename as we sanitize it via # os.path.basename later. try: value = bytes(reply.rawHeader(content_disposition_header)) log.network.debug("Parsing Content-Disposition: {value!r}") content_disposition = ContentDisposition.parse(value) filename = content_disposition.filename() except ContentDispositionError as e: log.network.error(f"Error while parsing filename: {e}") else: is_inline = content_disposition.is_inline() # Then try to get filename from url if not filename: filename = reply.url().path().rstrip('/') # If that fails as well, use a fallback if not filename: filename = 'qutebrowser-download' return is_inline, os.path.basename(filename) ``` ###Assistant : Parse a content_disposition header. Args: reply: The QNetworkReply to get a filename for. Return: A (is_inline, filename) tuple.
1,257
def _get_categorical_mapping(self, scale, data): levels = categorical_order(data, scale.order) n = len(levels) values = scale.values if isinstance(values, dict): self._check_dict_entries(levels, values) # TODO where to ensure that dict values have consistent representation? colors = [values[x] for x in levels] elif isinstance(values, list): colors = self._check_list_length(levels, scale.values) elif isinstance(values, tuple): colors = blend_palette(values, n) elif isinstance(values, str): colors = color_palette(values, n) elif values is None: if n <= len(get_color_cycle()): # Use current (global) default palette colors = color_palette(n_colors=n) else: colors = color_palette("husl", n) else: scale_class = scale.__class__.__name__ msg = " ".join([ f"Scale values for {self.variable} with a {scale_class} mapping", f"must be string, list, tuple, or dict; not {type(scale.values)}." ]) raise TypeError(msg) # If color specified here has alpha channel, it will override alpha property colors = self._standardize_colors(colors)
Define mapping as lookup in list of discrete color values.
10
124
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _get_categorical_mapping(self, scale, data): levels = categorical_order(data, scale.order) n = len(levels) values = scale.values if isinstance(values, dict): self._check_dict_entries(levels, values) # TODO where to ensure that dict values have consistent representation? colors = [values[x] for x in levels] elif isinstance(values, list): colors = self._check_list_length(levels, scale.values) elif isinstance(values, tuple): colors = blend_palette(values, n) elif isinstance(values, str): colors = color_palette(values, n) elif values is None: if n <= len(get_color_cycle()): # Use current (global) default palette colors = color_palette(n_colors=n) else: colors = color_palette("husl", n) else: scale_class = scale.__class__.__name__ msg = " ".join([ f"Scale values for {self.variable} with a {scale_class} mapping", f"must be string, list, tuple, or dict; not {type(scale.values)}." ]) raise TypeError(msg) # If color specified here has alpha channel, it will override alpha property colors = self._standardize_colors(colors) ``` ###Assistant : Define mapping as lookup in list of discrete color values.
1,258
def minorlocator(self, loc): self._long_axis().set_minor_locator(loc) self._minorlocator = loc
Set minor locator being used for colorbar
7
7
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def minorlocator(self, loc): self._long_axis().set_minor_locator(loc) self._minorlocator = loc ``` ###Assistant : Set minor locator being used for colorbar
1,259
def test_process_datetime_to_timestamp_freeze_time(time_zone, hass): hass.config.set_time_zone(time_zone) utc_now = dt_util.utcnow() with freeze_time(utc_now): epoch = utc_now.timestamp() assert process_datetime_to_timestamp(dt_util.utcnow()) == epoch now = dt_util.now() assert process_datetime_to_timestamp(now) == epoch @pytest.mark.parametrize( "time_zone", ["Europe/Berlin", "America/Chicago", "US/Hawaii", "UTC"] )
Test we can handle processing database datatimes to timestamps. This test freezes time to make sure everything matches.
18
30
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_process_datetime_to_timestamp_freeze_time(time_zone, hass): hass.config.set_time_zone(time_zone) utc_now = dt_util.utcnow() with freeze_time(utc_now): epoch = utc_now.timestamp() assert process_datetime_to_timestamp(dt_util.utcnow()) == epoch now = dt_util.now() assert process_datetime_to_timestamp(now) == epoch @pytest.mark.parametrize( "time_zone", ["Europe/Berlin", "America/Chicago", "US/Hawaii", "UTC"] ) ``` ###Assistant : Test we can handle processing database datatimes to timestamps. This test freezes time to make sure everything matches.
1,260
def _spatially_filter(self) -> np.ndarray: logger.debug("Spatially Filter") assert self._shapes_model is not None landmarks_norm = self._normalized["landmarks"] # Convert to matrix form landmarks_norm_table = np.reshape(landmarks_norm, [68 * 2, landmarks_norm.shape[2]]).T # Project onto shapes model and reconstruct landmarks_norm_table_rec = self._shapes_model.inverse_transform( self._shapes_model.transform(landmarks_norm_table)) # Convert back to shapes (numKeypoint, num_dims, numFrames) landmarks_norm_rec = np.reshape(landmarks_norm_table_rec.T, [68, 2, landmarks_norm.shape[2]]) # Transform back to image co-ordinates retval = self._normalized_to_original(landmarks_norm_rec, self._normalized["scale_factors"], self._normalized["mean_coords"]) logger.debug("Spatially Filtered: %s", retval) return retval
interpret the shapes using our shape model (project and reconstruct) Returns ------- :class:`numpy.ndarray` The filtered landmarks in original coordinate space
20
68
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _spatially_filter(self) -> np.ndarray: logger.debug("Spatially Filter") assert self._shapes_model is not None landmarks_norm = self._normalized["landmarks"] # Convert to matrix form landmarks_norm_table = np.reshape(landmarks_norm, [68 * 2, landmarks_norm.shape[2]]).T # Project onto shapes model and reconstruct landmarks_norm_table_rec = self._shapes_model.inverse_transform( self._shapes_model.transform(landmarks_norm_table)) # Convert back to shapes (numKeypoint, num_dims, numFrames) landmarks_norm_rec = np.reshape(landmarks_norm_table_rec.T, [68, 2, landmarks_norm.shape[2]]) # Transform back to image co-ordinates retval = self._normalized_to_original(landmarks_norm_rec, self._normalized["scale_factors"], self._normalized["mean_coords"]) logger.debug("Spatially Filtered: %s", retval) return retval ``` ###Assistant : interpret the shapes using our shape model (project and reconstruct) Returns ------- :class:`numpy.ndarray` The filtered landmarks in original coordinate space
1,261
def letter_form(self): return tuple(flatten([(i,)*j if j > 0 else (-i,)*(-j) for i, j in self.array_form]))
The letter representation of a ``FreeGroupElement`` is a tuple of generator symbols, with each entry corresponding to a group generator. Inverses of the generators are represented by negative generator symbols. Examples ======== >>> from sympy.combinatorics import free_group >>> f, a, b, c, d = free_group("a b c d") >>> (a**3).letter_form (a, a, a) >>> (a**2*d**-2*a*b**-4).letter_form (a, a, -d, -d, a, -b, -b, -b, -b) >>> (a**-2*b**3*d).letter_form (-a, -a, b, b, b, d) See Also ======== array_form
76
15
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def letter_form(self): return tuple(flatten([(i,)*j if j > 0 else (-i,)*(-j) for i, j in self.array_form])) ``` ###Assistant : The letter representation of a ``FreeGroupElement`` is a tuple of generator symbols, with each entry corresponding to a group generator. Inverses of the generators are represented by negative generator symbols. Examples ======== >>> from sympy.combinatorics import free_group >>> f, a, b, c, d = free_group("a b c d") >>> (a**3).letter_form (a, a, a) >>> (a**2*d**-2*a*b**-4).letter_form (a, a, -d, -d, a, -b, -b, -b, -b) >>> (a**-2*b**3*d).letter_form (-a, -a, b, b, b, d) See Also ======== array_form
1,262
def _handle_default_message(self, type, data): logger.debug(f"Received message from Leader of type {type}: {data}")
Default leader message handler, just logs it. We should never have to run this unless the leader sends us some weird message.
22
12
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _handle_default_message(self, type, data): logger.debug(f"Received message from Leader of type {type}: {data}") ``` ###Assistant : Default leader message handler, just logs it. We should never have to run this unless the leader sends us some weird message.
1,263
def export_probs(self) -> dict[str, Any]: result = {} for module in self.nas_modules: try: result.update(module.export_probs(memo=result)) except NotImplementedError: warnings.warn( 'Some super-modules you have used did not implement export_probs. You might find some logs are missing.', UserWarning ) return result
Export the probability of every choice in the search space got chosen. .. note:: If such method of some modules is not implemented, they will be simply ignored. Returns ------- dict In most cases, keys are names of ``nas_modules`` suffixed with ``/`` and choice name. Values are the probability / logits depending on the implementation.
55
37
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def export_probs(self) -> dict[str, Any]: result = {} for module in self.nas_modules: try: result.update(module.export_probs(memo=result)) except NotImplementedError: warnings.warn( 'Some super-modules you have used did not implement export_probs. You might find some logs are missing.', UserWarning ) return result ``` ###Assistant : Export the probability of every choice in the search space got chosen. .. note:: If such method of some modules is not implemented, they will be simply ignored. Returns ------- dict In most cases, keys are names of ``nas_modules`` suffixed with ``/`` and choice name. Values are the probability / logits depending on the implementation.
1,264
def debounce_update_release_health_data(organization, project_ids): # Figure out which projects need to get updates from the snuba. should_update = {} cache_keys = ["debounce-health:%d" % id for id in project_ids] cache_data = cache.get_many(cache_keys) for project_id, cache_key in zip(project_ids, cache_keys): if cache_data.get(cache_key) is None: should_update[project_id] = cache_key if not should_update: return projects = {p.id: p for p in Project.objects.get_many_from_cache(should_update.keys())} # This gives us updates for all release-projects which have seen new # health data over the last days. It will miss releases where the last # date is longer than what `get_changed_project_release_model_adoptions` # considers recent. project_releases = release_health.get_changed_project_release_model_adoptions( should_update.keys() ) # Check which we already have rows for. existing = set( ReleaseProject.objects.filter( project_id__in=[x[0] for x in project_releases], release__version__in=[x[1] for x in project_releases], ).values_list("project_id", "release__version") ) to_upsert = [] for key in project_releases: if key not in existing: to_upsert.append(key) if to_upsert: dates = release_health.get_oldest_health_data_for_releases(to_upsert) for project_id, version in to_upsert: project = projects.get(project_id) if project is None: # should not happen continue # Ignore versions that were saved with an empty string before validation was added if not Release.is_valid_version(version): continue # We might have never observed the release. This for instance can # happen if the release only had health data so far. For these cases # we want to create the release the first time we observed it on the # health side. release = Release.get_or_create( project=project, version=version, date_added=dates.get((project_id, version)) ) # Make sure that the release knows about this project. Like we had before # the project might not have been associated with this release yet. release.add_project(project) # Debounce updates for a minute cache.set_many(dict(zip(should_update.values(), [True] * len(should_update))), 60) @region_silo_endpoint
This causes a flush of snuba health data to the postgres tables once per minute for the given projects.
19
265
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def debounce_update_release_health_data(organization, project_ids): # Figure out which projects need to get updates from the snuba. should_update = {} cache_keys = ["debounce-health:%d" % id for id in project_ids] cache_data = cache.get_many(cache_keys) for project_id, cache_key in zip(project_ids, cache_keys): if cache_data.get(cache_key) is None: should_update[project_id] = cache_key if not should_update: return projects = {p.id: p for p in Project.objects.get_many_from_cache(should_update.keys())} # This gives us updates for all release-projects which have seen new # health data over the last days. It will miss releases where the last # date is longer than what `get_changed_project_release_model_adoptions` # considers recent. project_releases = release_health.get_changed_project_release_model_adoptions( should_update.keys() ) # Check which we already have rows for. existing = set( ReleaseProject.objects.filter( project_id__in=[x[0] for x in project_releases], release__version__in=[x[1] for x in project_releases], ).values_list("project_id", "release__version") ) to_upsert = [] for key in project_releases: if key not in existing: to_upsert.append(key) if to_upsert: dates = release_health.get_oldest_health_data_for_releases(to_upsert) for project_id, version in to_upsert: project = projects.get(project_id) if project is None: # should not happen continue # Ignore versions that were saved with an empty string before validation was added if not Release.is_valid_version(version): continue # We might have never observed the release. This for instance can # happen if the release only had health data so far. For these cases # we want to create the release the first time we observed it on the # health side. release = Release.get_or_create( project=project, version=version, date_added=dates.get((project_id, version)) ) # Make sure that the release knows about this project. Like we had before # the project might not have been associated with this release yet. release.add_project(project) # Debounce updates for a minute cache.set_many(dict(zip(should_update.values(), [True] * len(should_update))), 60) @region_silo_endpoint ``` ###Assistant : This causes a flush of snuba health data to the postgres tables once per minute for the given projects.
1,265
def show_code(co, *, file=None): print(code_info(co), file=file) _Instruction = collections.namedtuple("_Instruction", "opname opcode arg argval argrepr offset starts_line is_jump_target") _Instruction.opname.__doc__ = "Human readable name for operation" _Instruction.opcode.__doc__ = "Numeric code for operation" _Instruction.arg.__doc__ = "Numeric argument to operation (if any), otherwise None" _Instruction.argval.__doc__ = "Resolved arg value (if known), otherwise same as arg" _Instruction.argrepr.__doc__ = "Human readable description of operation argument" _Instruction.offset.__doc__ = "Start index of operation within bytecode sequence" _Instruction.starts_line.__doc__ = "Line started by this opcode (if any), otherwise None" _Instruction.is_jump_target.__doc__ = "True if other code jumps to here, otherwise False" _OPNAME_WIDTH = 20 _OPARG_WIDTH = 5
Print details of methods, functions, or code to *file*. If *file* is not provided, the output is printed on stdout.
20
96
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def show_code(co, *, file=None): print(code_info(co), file=file) _Instruction = collections.namedtuple("_Instruction", "opname opcode arg argval argrepr offset starts_line is_jump_target") _Instruction.opname.__doc__ = "Human readable name for operation" _Instruction.opcode.__doc__ = "Numeric code for operation" _Instruction.arg.__doc__ = "Numeric argument to operation (if any), otherwise None" _Instruction.argval.__doc__ = "Resolved arg value (if known), otherwise same as arg" _Instruction.argrepr.__doc__ = "Human readable description of operation argument" _Instruction.offset.__doc__ = "Start index of operation within bytecode sequence" _Instruction.starts_line.__doc__ = "Line started by this opcode (if any), otherwise None" _Instruction.is_jump_target.__doc__ = "True if other code jumps to here, otherwise False" _OPNAME_WIDTH = 20 _OPARG_WIDTH = 5 ``` ###Assistant : Print details of methods, functions, or code to *file*. If *file* is not provided, the output is printed on stdout.
1,266
def set_cmap(cmap): cmap = colormaps[cmap] rc('image', cmap=cmap.name) im = gci() if im is not None: im.set_cmap(cmap) @_copy_docstring_and_deprecators(matplotlib.image.imread)
Set the default colormap, and applies it to the current image if any. Parameters ---------- cmap : `~matplotlib.colors.Colormap` or str A colormap instance or the name of a registered colormap. See Also -------- colormaps matplotlib.cm.register_cmap matplotlib.cm.get_cmap
36
17
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def set_cmap(cmap): cmap = colormaps[cmap] rc('image', cmap=cmap.name) im = gci() if im is not None: im.set_cmap(cmap) @_copy_docstring_and_deprecators(matplotlib.image.imread) ``` ###Assistant : Set the default colormap, and applies it to the current image if any. Parameters ---------- cmap : `~matplotlib.colors.Colormap` or str A colormap instance or the name of a registered colormap. See Also -------- colormaps matplotlib.cm.register_cmap matplotlib.cm.get_cmap
1,267
def completion_item_focus(self, which, history=False): if history: if (self._cmd.text() == ':' or self._cmd.history.is_browsing() or not self._active): if which == 'next': self._cmd.command_history_next() return elif which == 'prev': self._cmd.command_history_prev() return else: raise cmdutils.CommandError("Can't combine --history with " "{}!".format(which)) if not self._active: return selmodel = self.selectionModel() indices = { 'next': lambda: self._next_idx(upwards=False), 'prev': lambda: self._next_idx(upwards=True), 'next-category': lambda: self._next_category_idx(upwards=False), 'prev-category': lambda: self._next_category_idx(upwards=True), 'next-page': lambda: self._next_page(upwards=False), 'prev-page': lambda: self._next_page(upwards=True), } idx = indices[which]() if not idx.isValid(): return selmodel.setCurrentIndex( idx, QItemSelectionModel.ClearAndSelect | QItemSelectionModel.Rows) # if the last item is focused, try to fetch more next_idx = self.indexBelow(idx) if not self.visualRect(next_idx).isValid(): self.expandAll() count = self._model().count() if count == 0: self.hide() elif count == 1 and config.val.completion.quick: self.hide() elif config.val.completion.show == 'auto': self.show()
Shift the focus of the completion menu to another item. Args: which: 'next', 'prev', 'next-category', 'prev-category', 'next-page', or 'prev-page'. history: Navigate through command history if no text was typed.
29
114
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def completion_item_focus(self, which, history=False): if history: if (self._cmd.text() == ':' or self._cmd.history.is_browsing() or not self._active): if which == 'next': self._cmd.command_history_next() return elif which == 'prev': self._cmd.command_history_prev() return else: raise cmdutils.CommandError("Can't combine --history with " "{}!".format(which)) if not self._active: return selmodel = self.selectionModel() indices = { 'next': lambda: self._next_idx(upwards=False), 'prev': lambda: self._next_idx(upwards=True), 'next-category': lambda: self._next_category_idx(upwards=False), 'prev-category': lambda: self._next_category_idx(upwards=True), 'next-page': lambda: self._next_page(upwards=False), 'prev-page': lambda: self._next_page(upwards=True), } idx = indices[which]() if not idx.isValid(): return selmodel.setCurrentIndex( idx, QItemSelectionModel.ClearAndSelect | QItemSelectionModel.Rows) # if the last item is focused, try to fetch more next_idx = self.indexBelow(idx) if not self.visualRect(next_idx).isValid(): self.expandAll() count = self._model().count() if count == 0: self.hide() elif count == 1 and config.val.completion.quick: self.hide() elif config.val.completion.show == 'auto': self.show() ``` ###Assistant : Shift the focus of the completion menu to another item. Args: which: 'next', 'prev', 'next-category', 'prev-category', 'next-page', or 'prev-page'. history: Navigate through command history if no text was typed.
1,268
def _background_extract(self, output_folder, progress_queue): _io = dict(saver=ImagesSaver(get_folder(output_folder), as_bytes=True), loader=ImagesLoader(self._input_location, count=self._alignments.frames_count)) for frame_idx, (filename, image) in enumerate(_io["loader"].load()): logger.trace("Outputting frame: %s: %s", frame_idx, filename) src_filename = os.path.basename(filename) frame_name = os.path.splitext(src_filename)[0] progress_queue.put(1) for face_idx, face in enumerate(self._frame_faces[frame_idx]): output = f"{frame_name}_{face_idx}.png" aligned = AlignedFace(face.landmarks_xy, image=image, centering="head", size=512) # TODO user selectable size meta = dict(alignments=face.to_png_meta(), source=dict(alignments_version=self._alignments.version, original_filename=output, face_index=face_idx, source_filename=src_filename, source_is_video=self._globals.is_video, source_frame_dims=image.shape[:2])) b_image = encode_image(aligned.face, ".png", metadata=meta) _io["saver"].save(output, b_image) _io["saver"].close()
Perform the background extraction in a thread so GUI doesn't become unresponsive. Parameters ---------- output_folder: str The location to save the output faces to progress_queue: :class:`queue.Queue` The queue to place incremental counts to for updating the GUI's progress bar
39
65
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _background_extract(self, output_folder, progress_queue): _io = dict(saver=ImagesSaver(get_folder(output_folder), as_bytes=True), loader=ImagesLoader(self._input_location, count=self._alignments.frames_count)) for frame_idx, (filename, image) in enumerate(_io["loader"].load()): logger.trace("Outputting frame: %s: %s", frame_idx, filename) src_filename = os.path.basename(filename) frame_name = os.path.splitext(src_filename)[0] progress_queue.put(1) for face_idx, face in enumerate(self._frame_faces[frame_idx]): output = f"{frame_name}_{face_idx}.png" aligned = AlignedFace(face.landmarks_xy, image=image, centering="head", size=512) # TODO user selectable size meta = dict(alignments=face.to_png_meta(), source=dict(alignments_version=self._alignments.version, original_filename=output, face_index=face_idx, source_filename=src_filename, source_is_video=self._globals.is_video, source_frame_dims=image.shape[:2])) b_image = encode_image(aligned.face, ".png", metadata=meta) _io["saver"].save(output, b_image) _io["saver"].close() ``` ###Assistant : Perform the background extraction in a thread so GUI doesn't become unresponsive. Parameters ---------- output_folder: str The location to save the output faces to progress_queue: :class:`queue.Queue` The queue to place incremental counts to for updating the GUI's progress bar
1,269
def test_multi_part_language(self, m): m.return_value = ["chi_sim", "eng"] msgs = check_default_language_available(None) self.assertEqual(len(msgs), 0)
GIVEN: - An OCR language which is multi part (ie chi-sim) - The language is correctly formatted WHEN: - Installed packages are checked THEN: - No errors are reported
29
12
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_multi_part_language(self, m): m.return_value = ["chi_sim", "eng"] msgs = check_default_language_available(None) self.assertEqual(len(msgs), 0) ``` ###Assistant : GIVEN: - An OCR language which is multi part (ie chi-sim) - The language is correctly formatted WHEN: - Installed packages are checked THEN: - No errors are reported
1,270
def finished_callback(self, runner_obj): event_data = { 'event': 'EOF', 'final_counter': self.event_ct, 'guid': self.guid, } event_data.setdefault(self.event_data_key, self.instance.id) self.dispatcher.dispatch(event_data)
Ansible runner callback triggered on finished run
7
16
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def finished_callback(self, runner_obj): event_data = { 'event': 'EOF', 'final_counter': self.event_ct, 'guid': self.guid, } event_data.setdefault(self.event_data_key, self.instance.id) self.dispatcher.dispatch(event_data) ``` ###Assistant : Ansible runner callback triggered on finished run
1,271
def test_anonymize_gql_operation_response_with_fragment_spread(gql_operation_factory): query = result = {"data": "result"} sensitive_fields = {"Product": {"name"}} operation_result = gql_operation_factory(query, result=result) anonymize_gql_operation_response(operation_result, sensitive_fields) assert operation_result.result["data"] == MASK @pytest.mark.parametrize( "sensitive_fields", [ {"NonExistingType": {}}, {"Product": {"nonExistingField"}}, {"Node": {"id"}}, ], )
fragment ProductFragment on Product { id name } query products($first: Int){ products(channel: "channel-pln", first:$first){ edges{ node{ ... ProductFragment variants { variantName: name } } } } }
27
33
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_anonymize_gql_operation_response_with_fragment_spread(gql_operation_factory): query = result = {"data": "result"} sensitive_fields = {"Product": {"name"}} operation_result = gql_operation_factory(query, result=result) anonymize_gql_operation_response(operation_result, sensitive_fields) assert operation_result.result["data"] == MASK @pytest.mark.parametrize( "sensitive_fields", [ {"NonExistingType": {}}, {"Product": {"nonExistingField"}}, {"Node": {"id"}}, ], ) ``` ###Assistant : fragment ProductFragment on Product { id name } query products($first: Int){ products(channel: "channel-pln", first:$first){ edges{ node{ ... ProductFragment variants { variantName: name } } } } }
1,272
def complete_bipartite_graph(n1, n2, create_using=None): G = nx.empty_graph(0, create_using) if G.is_directed(): raise nx.NetworkXError("Directed Graph not supported") n1, top = n1 n2, bottom = n2 if isinstance(n1, numbers.Integral) and isinstance(n2, numbers.Integral): bottom = [n1 + i for i in bottom] G.add_nodes_from(top, bipartite=0) G.add_nodes_from(bottom, bipartite=1) if len(G) != len(top) + len(bottom): raise nx.NetworkXError("Inputs n1 and n2 must contain distinct nodes") G.add_edges_from((u, v) for u in top for v in bottom) G.graph["name"] = f"complete_bipartite_graph({n1}, {n2})" return G @py_random_state(3)
Returns the complete bipartite graph `K_{n_1,n_2}`. The graph is composed of two partitions with nodes 0 to (n1 - 1) in the first and nodes n1 to (n1 + n2 - 1) in the second. Each node in the first is connected to each node in the second. Parameters ---------- n1, n2 : integer or iterable container of nodes If integers, nodes are from `range(n1)` and `range(n1, n1 + n2)`. If a container, the elements are the nodes. create_using : NetworkX graph instance, (default: nx.Graph) Return graph of this type. Notes ----- Nodes are the integers 0 to `n1 + n2 - 1` unless either n1 or n2 are containers of nodes. If only one of n1 or n2 are integers, that integer is replaced by `range` of that integer. The nodes are assigned the attribute 'bipartite' with the value 0 or 1 to indicate which bipartite set the node belongs to. This function is not imported in the main namespace. To use it use nx.bipartite.complete_bipartite_graph
166
74
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def complete_bipartite_graph(n1, n2, create_using=None): G = nx.empty_graph(0, create_using) if G.is_directed(): raise nx.NetworkXError("Directed Graph not supported") n1, top = n1 n2, bottom = n2 if isinstance(n1, numbers.Integral) and isinstance(n2, numbers.Integral): bottom = [n1 + i for i in bottom] G.add_nodes_from(top, bipartite=0) G.add_nodes_from(bottom, bipartite=1) if len(G) != len(top) + len(bottom): raise nx.NetworkXError("Inputs n1 and n2 must contain distinct nodes") G.add_edges_from((u, v) for u in top for v in bottom) G.graph["name"] = f"complete_bipartite_graph({n1}, {n2})" return G @py_random_state(3) ``` ###Assistant : Returns the complete bipartite graph `K_{n_1,n_2}`. The graph is composed of two partitions with nodes 0 to (n1 - 1) in the first and nodes n1 to (n1 + n2 - 1) in the second. Each node in the first is connected to each node in the second. Parameters ---------- n1, n2 : integer or iterable container of nodes If integers, nodes are from `range(n1)` and `range(n1, n1 + n2)`. If a container, the elements are the nodes. create_using : NetworkX graph instance, (default: nx.Graph) Return graph of this type. Notes ----- Nodes are the integers 0 to `n1 + n2 - 1` unless either n1 or n2 are containers of nodes. If only one of n1 or n2 are integers, that integer is replaced by `range` of that integer. The nodes are assigned the attribute 'bipartite' with the value 0 or 1 to indicate which bipartite set the node belongs to. This function is not imported in the main namespace. To use it use nx.bipartite.complete_bipartite_graph
1,273
def shutdown(self, callback=None): if self._state == _UNWRAPPED: raise RuntimeError('no security layer present') if self._state == _SHUTDOWN: raise RuntimeError('shutdown in progress') assert self._state in (_WRAPPED, _DO_HANDSHAKE) self._state = _SHUTDOWN self._shutdown_cb = callback ssldata, appdata = self.feed_ssldata(b'') assert appdata == [] or appdata == [b''] return ssldata
Start the SSL shutdown sequence. Return a list of ssldata. A ssldata element is a list of buffers The optional *callback* argument can be used to install a callback that will be called when the shutdown is complete. The callback will be called without arguments.
45
45
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def shutdown(self, callback=None): if self._state == _UNWRAPPED: raise RuntimeError('no security layer present') if self._state == _SHUTDOWN: raise RuntimeError('shutdown in progress') assert self._state in (_WRAPPED, _DO_HANDSHAKE) self._state = _SHUTDOWN self._shutdown_cb = callback ssldata, appdata = self.feed_ssldata(b'') assert appdata == [] or appdata == [b''] return ssldata ``` ###Assistant : Start the SSL shutdown sequence. Return a list of ssldata. A ssldata element is a list of buffers The optional *callback* argument can be used to install a callback that will be called when the shutdown is complete. The callback will be called without arguments.
1,274
def preferred_ip(vm_, ips): proto = config.get_cloud_config_value( "protocol", vm_, __opts__, default="ipv4", search_global=False ) family = socket.AF_INET if proto == "ipv6": family = socket.AF_INET6 for ip in ips: ignore_ip = ignore_cidr(vm_, ip) if ignore_ip: continue try: socket.inet_pton(family, ip) return ip except Exception: # pylint: disable=broad-except continue return False
Return either an 'ipv4' (default) or 'ipv6' address depending on 'protocol' option. The list of 'ipv4' IPs is filtered by ignore_cidr() to remove any unreachable private addresses.
27
46
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def preferred_ip(vm_, ips): proto = config.get_cloud_config_value( "protocol", vm_, __opts__, default="ipv4", search_global=False ) family = socket.AF_INET if proto == "ipv6": family = socket.AF_INET6 for ip in ips: ignore_ip = ignore_cidr(vm_, ip) if ignore_ip: continue try: socket.inet_pton(family, ip) return ip except Exception: # pylint: disable=broad-except continue return False ``` ###Assistant : Return either an 'ipv4' (default) or 'ipv6' address depending on 'protocol' option. The list of 'ipv4' IPs is filtered by ignore_cidr() to remove any unreachable private addresses.
1,275
def get_parent_account(doctype, txt, searchfield, start, page_len, filters): return frappe.db.sql( % ("%s", searchfield, "%s", "%s", "%s"), (filters["company"], "%%%s%%" % txt, page_len, start), as_list=1, )
select name from tabAccount where is_group = 1 and docstatus != 2 and company = %s and %s like %s order by name limit %s offset %s
27
23
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_parent_account(doctype, txt, searchfield, start, page_len, filters): return frappe.db.sql( % ("%s", searchfield, "%s", "%s", "%s"), (filters["company"], "%%%s%%" % txt, page_len, start), as_list=1, ) ``` ###Assistant : select name from tabAccount where is_group = 1 and docstatus != 2 and company = %s and %s like %s order by name limit %s offset %s
1,276
def exact_laplacian_kernel(x, y, stddev): r x_aligned, y_aligned = _align_matrices(x, y) diff_l1_norm = tf.reduce_sum(tf.abs(tf.subtract(x_aligned, y_aligned)), 2) return tf.exp(-diff_l1_norm / stddev)
Computes exact Laplacian kernel value(s) for tensors x and y using stddev. The Laplacian kernel for vectors u, v is defined as follows: K(u, v) = exp(-||u-v|| / stddev) where the norm is the l1-norm. x, y can be either vectors or matrices. If they are vectors, they must have the same dimension. If they are matrices, they must have the same number of columns. In the latter case, the method returns (as a matrix) K(u, v) values for all pairs (u, v) where u is a row from x and v is a row from y. Args: x: a tensor of rank 1 or 2. It's shape should be either [dim] or [m, dim]. y: a tensor of rank 1 or 2. It's shape should be either [dim] or [n, dim]. stddev: The width of the Gaussian kernel. Returns: A single value (scalar) with shape (1, 1) if x, y are vectors or a matrix of shape (m, n) with entries K(u, v) (where K is the Laplacian kernel) for all (u,v) pairs where u, v are rows from x and y respectively. Raises: ValueError: if the shapes of x, y are not compatible.
195
19
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def exact_laplacian_kernel(x, y, stddev): r x_aligned, y_aligned = _align_matrices(x, y) diff_l1_norm = tf.reduce_sum(tf.abs(tf.subtract(x_aligned, y_aligned)), 2) return tf.exp(-diff_l1_norm / stddev) ``` ###Assistant : Computes exact Laplacian kernel value(s) for tensors x and y using stddev. The Laplacian kernel for vectors u, v is defined as follows: K(u, v) = exp(-||u-v|| / stddev) where the norm is the l1-norm. x, y can be either vectors or matrices. If they are vectors, they must have the same dimension. If they are matrices, they must have the same number of columns. In the latter case, the method returns (as a matrix) K(u, v) values for all pairs (u, v) where u is a row from x and v is a row from y. Args: x: a tensor of rank 1 or 2. It's shape should be either [dim] or [m, dim]. y: a tensor of rank 1 or 2. It's shape should be either [dim] or [n, dim]. stddev: The width of the Gaussian kernel. Returns: A single value (scalar) with shape (1, 1) if x, y are vectors or a matrix of shape (m, n) with entries K(u, v) (where K is the Laplacian kernel) for all (u,v) pairs where u, v are rows from x and y respectively. Raises: ValueError: if the shapes of x, y are not compatible.
1,277
def transform(self, X): check_is_fitted(self) if self.n_neighbors is not None: distances, indices = self.nbrs_.kneighbors(X, return_distance=True) else: distances, indices = self.nbrs_.radius_neighbors(X, return_distance=True) # Create the graph of shortest distances from X to # training data via the nearest neighbors of X. # This can be done as a single array operation, but it potentially # takes a lot of memory. To avoid that, use a loop: n_samples_fit = self.nbrs_.n_samples_fit_ n_queries = distances.shape[0] G_X = np.zeros((n_queries, n_samples_fit)) for i in range(n_queries): G_X[i] = np.min(self.dist_matrix_[indices[i]] + distances[i][:, None], 0) G_X **= 2 G_X *= -0.5 return self.kernel_pca_.transform(G_X)
Transform X. This is implemented by linking the points X into the graph of geodesic distances of the training data. First the `n_neighbors` nearest neighbors of X are found in the training data, and from these the shortest geodesic distances from each point in X to each point in the training data are computed in order to construct the kernel. The embedding of X is the projection of this kernel onto the embedding vectors of the training set. Parameters ---------- X : array-like, shape (n_queries, n_features) If neighbors_algorithm='precomputed', X is assumed to be a distance matrix or a sparse graph of shape (n_queries, n_samples_fit). Returns ------- X_new : array-like, shape (n_queries, n_components) X transformed in the new space.
118
93
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def transform(self, X): check_is_fitted(self) if self.n_neighbors is not None: distances, indices = self.nbrs_.kneighbors(X, return_distance=True) else: distances, indices = self.nbrs_.radius_neighbors(X, return_distance=True) # Create the graph of shortest distances from X to # training data via the nearest neighbors of X. # This can be done as a single array operation, but it potentially # takes a lot of memory. To avoid that, use a loop: n_samples_fit = self.nbrs_.n_samples_fit_ n_queries = distances.shape[0] G_X = np.zeros((n_queries, n_samples_fit)) for i in range(n_queries): G_X[i] = np.min(self.dist_matrix_[indices[i]] + distances[i][:, None], 0) G_X **= 2 G_X *= -0.5 return self.kernel_pca_.transform(G_X) ``` ###Assistant : Transform X. This is implemented by linking the points X into the graph of geodesic distances of the training data. First the `n_neighbors` nearest neighbors of X are found in the training data, and from these the shortest geodesic distances from each point in X to each point in the training data are computed in order to construct the kernel. The embedding of X is the projection of this kernel onto the embedding vectors of the training set. Parameters ---------- X : array-like, shape (n_queries, n_features) If neighbors_algorithm='precomputed', X is assumed to be a distance matrix or a sparse graph of shape (n_queries, n_samples_fit). Returns ------- X_new : array-like, shape (n_queries, n_components) X transformed in the new space.
1,278
def generate_config_style_dict(self) -> dict[str, str]: keys_converting_dict = { "fill": ("color", "fill_color"), "fill-opacity": ("opacity", "fill_opacity"), "stroke": ("color", "stroke_color"), "stroke-opacity": ("opacity", "stroke_opacity"), "stroke-width": ("stroke_width",), } svg_default_dict = self.svg_default result = {} for svg_key, style_keys in keys_converting_dict.items(): for style_key in style_keys: if svg_default_dict[style_key] is None: continue result[svg_key] = str(svg_default_dict[style_key]) return result
Generate a dictionary holding the default style information.
8
48
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def generate_config_style_dict(self) -> dict[str, str]: keys_converting_dict = { "fill": ("color", "fill_color"), "fill-opacity": ("opacity", "fill_opacity"), "stroke": ("color", "stroke_color"), "stroke-opacity": ("opacity", "stroke_opacity"), "stroke-width": ("stroke_width",), } svg_default_dict = self.svg_default result = {} for svg_key, style_keys in keys_converting_dict.items(): for style_key in style_keys: if svg_default_dict[style_key] is None: continue result[svg_key] = str(svg_default_dict[style_key]) return result ``` ###Assistant : Generate a dictionary holding the default style information.
1,279
def get_variables(self): # type: () -> t.Dict[str, t.Union[str, t.List[str]]] return dict( bootstrap_type=self.bootstrap_type, controller='yes' if self.controller else '', python_versions=self.python_versions, ssh_key_type=self.ssh_key.KEY_TYPE, ssh_private_key=self.ssh_key.key_contents, ssh_public_key=self.ssh_key.pub_contents, )
The variables to template in the bootstrapping script.
8
22
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_variables(self): # type: () -> t.Dict[str, t.Union[str, t.List[str]]] return dict( bootstrap_type=self.bootstrap_type, controller='yes' if self.controller else '', python_versions=self.python_versions, ssh_key_type=self.ssh_key.KEY_TYPE, ssh_private_key=self.ssh_key.key_contents, ssh_public_key=self.ssh_key.pub_contents, ) ``` ###Assistant : The variables to template in the bootstrapping script.
1,280
def _create_pseudo_member_(cls, value): pseudo_member = cls._value2member_map_.get(value, None) if pseudo_member is None: # verify all bits are accounted for _, extra_flags = _decompose(cls, value) if extra_flags: raise ValueError("%r is not a valid %s" % (value, cls.__qualname__)) # construct a singleton enum pseudo-member pseudo_member = object.__new__(cls) pseudo_member._name_ = None pseudo_member._value_ = value # use setdefault in case another thread already created a composite # with this value pseudo_member = cls._value2member_map_.setdefault(value, pseudo_member) return pseudo_member
Create a composite member iff value contains only members.
9
71
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _create_pseudo_member_(cls, value): pseudo_member = cls._value2member_map_.get(value, None) if pseudo_member is None: # verify all bits are accounted for _, extra_flags = _decompose(cls, value) if extra_flags: raise ValueError("%r is not a valid %s" % (value, cls.__qualname__)) # construct a singleton enum pseudo-member pseudo_member = object.__new__(cls) pseudo_member._name_ = None pseudo_member._value_ = value # use setdefault in case another thread already created a composite # with this value pseudo_member = cls._value2member_map_.setdefault(value, pseudo_member) return pseudo_member ``` ###Assistant : Create a composite member iff value contains only members.
1,281
def _identify_infrequent(self, category_count, n_samples, col_idx): if isinstance(self.min_frequency, numbers.Integral): infrequent_mask = category_count < self.min_frequency elif isinstance(self.min_frequency, numbers.Real): min_frequency_abs = n_samples * self.min_frequency infrequent_mask = category_count < min_frequency_abs else: infrequent_mask = np.zeros(category_count.shape[0], dtype=bool) n_current_features = category_count.size - infrequent_mask.sum() + 1 if self.max_categories is not None and self.max_categories < n_current_features: # stable sort to preserve original count order smallest_levels = np.argsort(category_count, kind="mergesort")[ : -self.max_categories + 1 ] infrequent_mask[smallest_levels] = True output = np.flatnonzero(infrequent_mask) return output if output.size > 0 else None
Compute the infrequent indices. Parameters ---------- category_count : ndarray of shape (n_cardinality,) Category counts. n_samples : int Number of samples. col_idx : int Index of the current category. Only used for the error message. Returns ------- output : ndarray of shape (n_infrequent_categories,) or None If there are infrequent categories, indices of infrequent categories. Otherwise None.
55
78
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _identify_infrequent(self, category_count, n_samples, col_idx): if isinstance(self.min_frequency, numbers.Integral): infrequent_mask = category_count < self.min_frequency elif isinstance(self.min_frequency, numbers.Real): min_frequency_abs = n_samples * self.min_frequency infrequent_mask = category_count < min_frequency_abs else: infrequent_mask = np.zeros(category_count.shape[0], dtype=bool) n_current_features = category_count.size - infrequent_mask.sum() + 1 if self.max_categories is not None and self.max_categories < n_current_features: # stable sort to preserve original count order smallest_levels = np.argsort(category_count, kind="mergesort")[ : -self.max_categories + 1 ] infrequent_mask[smallest_levels] = True output = np.flatnonzero(infrequent_mask) return output if output.size > 0 else None ``` ###Assistant : Compute the infrequent indices. Parameters ---------- category_count : ndarray of shape (n_cardinality,) Category counts. n_samples : int Number of samples. col_idx : int Index of the current category. Only used for the error message. Returns ------- output : ndarray of shape (n_infrequent_categories,) or None If there are infrequent categories, indices of infrequent categories. Otherwise None.
1,282
def get_conn(self) -> container_v1.ClusterManagerClient: if self._client is None: credentials = self._get_credentials() self._client = container_v1.ClusterManagerClient(credentials=credentials, client_info=CLIENT_INFO) return self._client # To preserve backward compatibility # TODO: remove one day
Returns ClusterManagerCLinet object. :rtype: google.cloud.container_v1.ClusterManagerClient
5
27
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_conn(self) -> container_v1.ClusterManagerClient: if self._client is None: credentials = self._get_credentials() self._client = container_v1.ClusterManagerClient(credentials=credentials, client_info=CLIENT_INFO) return self._client # To preserve backward compatibility # TODO: remove one day ``` ###Assistant : Returns ClusterManagerCLinet object. :rtype: google.cloud.container_v1.ClusterManagerClient
1,283
def iscoroutinefunction(func): return (inspect.iscoroutinefunction(func) or getattr(func, '_is_coroutine', None) is _is_coroutine) # Prioritize native coroutine check to speed-up # asyncio.iscoroutine. _COROUTINE_TYPES = (types.CoroutineType, types.GeneratorType, collections.abc.Coroutine, CoroWrapper) _iscoroutine_typecache = set()
Return True if func is a decorated coroutine function.
9
28
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def iscoroutinefunction(func): return (inspect.iscoroutinefunction(func) or getattr(func, '_is_coroutine', None) is _is_coroutine) # Prioritize native coroutine check to speed-up # asyncio.iscoroutine. _COROUTINE_TYPES = (types.CoroutineType, types.GeneratorType, collections.abc.Coroutine, CoroWrapper) _iscoroutine_typecache = set() ``` ###Assistant : Return True if func is a decorated coroutine function.
1,284
async def async_load(self) -> _T | None: if self._load_task is None: self._load_task = self.hass.async_create_task(self._async_load()) return await self._load_task
Load data. If the expected version and minor version do not match the given versions, the migrate function will be invoked with migrate_func(version, minor_version, config). Will ensure that when a call comes in while another one is in progress, the second call will wait and return the result of the first call.
52
17
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python async def async_load(self) -> _T | None: if self._load_task is None: self._load_task = self.hass.async_create_task(self._async_load()) return await self._load_task ``` ###Assistant : Load data. If the expected version and minor version do not match the given versions, the migrate function will be invoked with migrate_func(version, minor_version, config). Will ensure that when a call comes in while another one is in progress, the second call will wait and return the result of the first call.
1,285
def extra_state_attributes(self) -> dict[str, Any] | None: data = super().extra_state_attributes or {} last_user = self.vera_device.get_last_user_alert() if last_user is not None: data[ATTR_LAST_USER_NAME] = last_user[1] data[ATTR_LOW_BATTERY] = self.vera_device.get_low_battery_alert() return data
Who unlocked the lock and did a low battery alert fire. Reports on the previous poll cycle. changed_by_name is a string like 'Bob'. low_battery is 1 if an alert fired, 0 otherwise.
32
28
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def extra_state_attributes(self) -> dict[str, Any] | None: data = super().extra_state_attributes or {} last_user = self.vera_device.get_last_user_alert() if last_user is not None: data[ATTR_LAST_USER_NAME] = last_user[1] data[ATTR_LOW_BATTERY] = self.vera_device.get_low_battery_alert() return data ``` ###Assistant : Who unlocked the lock and did a low battery alert fire. Reports on the previous poll cycle. changed_by_name is a string like 'Bob'. low_battery is 1 if an alert fired, 0 otherwise.
1,286
def magic(self, arg_s): warnings.warn( "`magic(...)` is deprecated since IPython 0.13 (warning added in " "8.1), use run_line_magic(magic_name, parameter_s).", DeprecationWarning, stacklevel=2, ) # TODO: should we issue a loud deprecation warning here? magic_name, _, magic_arg_s = arg_s.partition(' ') magic_name = magic_name.lstrip(prefilter.ESC_MAGIC) return self.run_line_magic(magic_name, magic_arg_s, _stack_depth=2) #------------------------------------------------------------------------- # Things related to macros #-------------------------------------------------------------------------
DEPRECATED Deprecated since IPython 0.13 (warning added in 8.1), use run_line_magic(magic_name, parameter_s). Call a magic function by name. Input: a string containing the name of the magic function to call and any additional arguments to be passed to the magic. magic('name -opt foo bar') is equivalent to typing at the ipython prompt: In[1]: %name -opt foo bar To call a magic without arguments, simply use magic('name'). This provides a proper Python function to call IPython's magics in any valid Python code you can type at the interpreter, including loops and compound statements.
92
51
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def magic(self, arg_s): warnings.warn( "`magic(...)` is deprecated since IPython 0.13 (warning added in " "8.1), use run_line_magic(magic_name, parameter_s).", DeprecationWarning, stacklevel=2, ) # TODO: should we issue a loud deprecation warning here? magic_name, _, magic_arg_s = arg_s.partition(' ') magic_name = magic_name.lstrip(prefilter.ESC_MAGIC) return self.run_line_magic(magic_name, magic_arg_s, _stack_depth=2) #------------------------------------------------------------------------- # Things related to macros #------------------------------------------------------------------------- ``` ###Assistant : DEPRECATED Deprecated since IPython 0.13 (warning added in 8.1), use run_line_magic(magic_name, parameter_s). Call a magic function by name. Input: a string containing the name of the magic function to call and any additional arguments to be passed to the magic. magic('name -opt foo bar') is equivalent to typing at the ipython prompt: In[1]: %name -opt foo bar To call a magic without arguments, simply use magic('name'). This provides a proper Python function to call IPython's magics in any valid Python code you can type at the interpreter, including loops and compound statements.
1,287
def create_command(name, **kwargs): # type: (str, **Any) -> Command module_path, class_name, summary = commands_dict[name] module = importlib.import_module(module_path) command_class = getattr(module, class_name) command = command_class(name=name, summary=summary, **kwargs) return command
Create an instance of the Command class with the given name.
11
28
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def create_command(name, **kwargs): # type: (str, **Any) -> Command module_path, class_name, summary = commands_dict[name] module = importlib.import_module(module_path) command_class = getattr(module, class_name) command = command_class(name=name, summary=summary, **kwargs) return command ``` ###Assistant : Create an instance of the Command class with the given name.
1,288
def to_pandas_refs(self) -> List[ObjectRef["pandas.DataFrame"]]: block_to_df = cached_remote_fn(_block_to_df) return [block_to_df.remote(block) for block in self._blocks.get_blocks()]
Convert this dataset into a distributed set of Pandas dataframes. This is only supported for datasets convertible to Arrow records. This function induces a copy of the data. For zero-copy access to the underlying data, consider using ``.to_arrow()`` or ``.get_internal_block_refs()``. Time complexity: O(dataset size / parallelism) Returns: A list of remote Pandas dataframes created from this dataset.
57
13
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def to_pandas_refs(self) -> List[ObjectRef["pandas.DataFrame"]]: block_to_df = cached_remote_fn(_block_to_df) return [block_to_df.remote(block) for block in self._blocks.get_blocks()] ``` ###Assistant : Convert this dataset into a distributed set of Pandas dataframes. This is only supported for datasets convertible to Arrow records. This function induces a copy of the data. For zero-copy access to the underlying data, consider using ``.to_arrow()`` or ``.get_internal_block_refs()``. Time complexity: O(dataset size / parallelism) Returns: A list of remote Pandas dataframes created from this dataset.
1,289
def hsplit(ary, indices_or_sections): if _nx.ndim(ary) == 0: raise ValueError('hsplit only works on arrays of 1 or more dimensions') if ary.ndim > 1: return split(ary, indices_or_sections, 1) else: return split(ary, indices_or_sections, 0) @array_function_dispatch(_hvdsplit_dispatcher)
Split an array into multiple sub-arrays horizontally (column-wise). Please refer to the `split` documentation. `hsplit` is equivalent to `split` with ``axis=1``, the array is always split along the second axis except for 1-D arrays, where it is split at ``axis=0``. See Also -------- split : Split an array into multiple sub-arrays of equal size. Examples -------- >>> x = np.arange(16.0).reshape(4, 4) >>> x array([[ 0., 1., 2., 3.], [ 4., 5., 6., 7.], [ 8., 9., 10., 11.], [12., 13., 14., 15.]]) >>> np.hsplit(x, 2) [array([[ 0., 1.], [ 4., 5.], [ 8., 9.], [12., 13.]]), array([[ 2., 3.], [ 6., 7.], [10., 11.], [14., 15.]])] >>> np.hsplit(x, np.array([3, 6])) [array([[ 0., 1., 2.], [ 4., 5., 6.], [ 8., 9., 10.], [12., 13., 14.]]), array([[ 3.], [ 7.], [11.], [15.]]), array([], shape=(4, 0), dtype=float64)] With a higher dimensional array the split is still along the second axis. >>> x = np.arange(8.0).reshape(2, 2, 2) >>> x array([[[0., 1.], [2., 3.]], [[4., 5.], [6., 7.]]]) >>> np.hsplit(x, 2) [array([[[0., 1.]], [[4., 5.]]]), array([[[2., 3.]], [[6., 7.]]])] With a 1-D array, the split is along axis 0. >>> x = np.array([0, 1, 2, 3, 4, 5]) >>> np.hsplit(x, 2) [array([0, 1, 2]), array([3, 4, 5])]
203
32
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def hsplit(ary, indices_or_sections): if _nx.ndim(ary) == 0: raise ValueError('hsplit only works on arrays of 1 or more dimensions') if ary.ndim > 1: return split(ary, indices_or_sections, 1) else: return split(ary, indices_or_sections, 0) @array_function_dispatch(_hvdsplit_dispatcher) ``` ###Assistant : Split an array into multiple sub-arrays horizontally (column-wise). Please refer to the `split` documentation. `hsplit` is equivalent to `split` with ``axis=1``, the array is always split along the second axis except for 1-D arrays, where it is split at ``axis=0``. See Also -------- split : Split an array into multiple sub-arrays of equal size. Examples -------- >>> x = np.arange(16.0).reshape(4, 4) >>> x array([[ 0., 1., 2., 3.], [ 4., 5., 6., 7.], [ 8., 9., 10., 11.], [12., 13., 14., 15.]]) >>> np.hsplit(x, 2) [array([[ 0., 1.], [ 4., 5.], [ 8., 9.], [12., 13.]]), array([[ 2., 3.], [ 6., 7.], [10., 11.], [14., 15.]])] >>> np.hsplit(x, np.array([3, 6])) [array([[ 0., 1., 2.], [ 4., 5., 6.], [ 8., 9., 10.], [12., 13., 14.]]), array([[ 3.], [ 7.], [11.], [15.]]), array([], shape=(4, 0), dtype=float64)] With a higher dimensional array the split is still along the second axis. >>> x = np.arange(8.0).reshape(2, 2, 2) >>> x array([[[0., 1.], [2., 3.]], [[4., 5.], [6., 7.]]]) >>> np.hsplit(x, 2) [array([[[0., 1.]], [[4., 5.]]]), array([[[2., 3.]], [[6., 7.]]])] With a 1-D array, the split is along axis 0. >>> x = np.array([0, 1, 2, 3, 4, 5]) >>> np.hsplit(x, 2) [array([0, 1, 2]), array([3, 4, 5])]
1,290
def get_points_earned(self): def get_returned_amount(): returned_amount = frappe.db.sql( , self.name, ) return abs(flt(returned_amount[0][0])) if returned_amount else 0 lp_details = get_loyalty_program_details_with_points( self.customer, company=self.company, loyalty_program=self.loyalty_program, expiry_date=self.posting_date, include_expired_entry=True, ) if ( lp_details and getdate(lp_details.from_date) <= getdate(self.posting_date) and (not lp_details.to_date or getdate(lp_details.to_date) >= getdate(self.posting_date)) ): returned_amount = get_returned_amount() eligible_amount = flt(self.grand_total) - cint(self.loyalty_amount) - returned_amount points_earned = cint(eligible_amount / lp_details.collection_factor) return points_earned or 0
select sum(grand_total) from `tabSales Invoice` where docstatus=1 and is_return=1 and ifnull(return_against, '')=%s
12
59
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def get_points_earned(self): def get_returned_amount(): returned_amount = frappe.db.sql( , self.name, ) return abs(flt(returned_amount[0][0])) if returned_amount else 0 lp_details = get_loyalty_program_details_with_points( self.customer, company=self.company, loyalty_program=self.loyalty_program, expiry_date=self.posting_date, include_expired_entry=True, ) if ( lp_details and getdate(lp_details.from_date) <= getdate(self.posting_date) and (not lp_details.to_date or getdate(lp_details.to_date) >= getdate(self.posting_date)) ): returned_amount = get_returned_amount() eligible_amount = flt(self.grand_total) - cint(self.loyalty_amount) - returned_amount points_earned = cint(eligible_amount / lp_details.collection_factor) return points_earned or 0 ``` ###Assistant : select sum(grand_total) from `tabSales Invoice` where docstatus=1 and is_return=1 and ifnull(return_against, '')=%s
1,291
def _get_kernel(self) -> plaidml.tile.Value: coords = np.arange(self._filter_size, dtype="float32") coords -= (self._filter_size - 1) / 2. kernel = np.square(coords) kernel *= -0.5 / np.square(self._filter_sigma) kernel = np.reshape(kernel, (1, -1)) + np.reshape(kernel, (-1, 1)) kernel = K.constant(np.reshape(kernel, (1, -1))) kernel = K.softmax(kernel) kernel = K.reshape(kernel, (self._filter_size, self._filter_size, 1, 1)) return kernel
Obtain the base kernel for performing depthwise convolution. Returns ------- :class:`plaidml.tile.Value` The gaussian kernel based on selected size and sigma
20
49
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _get_kernel(self) -> plaidml.tile.Value: coords = np.arange(self._filter_size, dtype="float32") coords -= (self._filter_size - 1) / 2. kernel = np.square(coords) kernel *= -0.5 / np.square(self._filter_sigma) kernel = np.reshape(kernel, (1, -1)) + np.reshape(kernel, (-1, 1)) kernel = K.constant(np.reshape(kernel, (1, -1))) kernel = K.softmax(kernel) kernel = K.reshape(kernel, (self._filter_size, self._filter_size, 1, 1)) return kernel ``` ###Assistant : Obtain the base kernel for performing depthwise convolution. Returns ------- :class:`plaidml.tile.Value` The gaussian kernel based on selected size and sigma
1,292
def test_default_default(self): block = blocks.ListBlock(blocks.CharBlock(default='chocolate')) self.assertEqual(list(block.get_default()), ['chocolate']) block.set_name('test_shoppinglistblock') js_args = ListBlockAdapter().js_args(block) self.assertEqual(js_args[2], 'chocolate')
if no explicit 'default' is set on the ListBlock, it should fall back on a single instance of the child block in its default state.
25
13
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_default_default(self): block = blocks.ListBlock(blocks.CharBlock(default='chocolate')) self.assertEqual(list(block.get_default()), ['chocolate']) block.set_name('test_shoppinglistblock') js_args = ListBlockAdapter().js_args(block) self.assertEqual(js_args[2], 'chocolate') ``` ###Assistant : if no explicit 'default' is set on the ListBlock, it should fall back on a single instance of the child block in its default state.
1,293
def convert_empty_str_key(self) -> None: if self.namespaces and "" in self.namespaces.keys(): self.namespaces[None] = self.namespaces.pop("", "default")
Replace zero-length string in `namespaces`. This method will replace '' with None to align to `lxml` requirement that empty string prefixes are not allowed.
24
14
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def convert_empty_str_key(self) -> None: if self.namespaces and "" in self.namespaces.keys(): self.namespaces[None] = self.namespaces.pop("", "default") ``` ###Assistant : Replace zero-length string in `namespaces`. This method will replace '' with None to align to `lxml` requirement that empty string prefixes are not allowed.
1,294
def test_get_with_custom_key_using_default_key(self): # Generate signature signature = generate_signature(self.image.id, "fill-800x600") # Get the image response = self.client.get( reverse( "wagtailimages_serve_custom_key", args=(signature, self.image.id, "fill-800x600"), ) + "test.png" ) # Check response self.assertEqual(response.status_code, 403)
Test that that the key can be changed on the view This tests that the default key no longer works when the key is changed on the view
28
30
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_get_with_custom_key_using_default_key(self): # Generate signature signature = generate_signature(self.image.id, "fill-800x600") # Get the image response = self.client.get( reverse( "wagtailimages_serve_custom_key", args=(signature, self.image.id, "fill-800x600"), ) + "test.png" ) # Check response self.assertEqual(response.status_code, 403) ``` ###Assistant : Test that that the key can be changed on the view This tests that the default key no longer works when the key is changed on the view
1,295
async def test_timeouts_do_not_hide_crashes(self, flow_run, orion_client): started = anyio.Event()
Since timeouts capture anyio cancellations, we want to ensure that something still ends up in a 'Crashed' state if it is cancelled independently from our timeout cancellation.
27
8
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python async def test_timeouts_do_not_hide_crashes(self, flow_run, orion_client): started = anyio.Event() ``` ###Assistant : Since timeouts capture anyio cancellations, we want to ensure that something still ends up in a 'Crashed' state if it is cancelled independently from our timeout cancellation.
1,296
def test_enable_disable_conflict_with_config(): nlp = English() nlp.add_pipe("tagger") nlp.add_pipe("senter") nlp.add_pipe("sentencizer") with make_tempdir() as tmp_dir: nlp.to_disk(tmp_dir) # Expected to fail, as config and arguments conflict. with pytest.raises(ValueError): spacy.load( tmp_dir, enable=["tagger"], config={"nlp": {"disabled": ["senter"]}} ) # Expected to succeed without warning due to the lack of a conflicting config option. spacy.load(tmp_dir, enable=["tagger"]) # Expected to succeed with a warning, as disable=[] should override the config setting. with pytest.warns(UserWarning): spacy.load( tmp_dir, enable=["tagger"], disable=[], config={"nlp": {"disabled": ["senter"]}}, )
Test conflict between enable/disable w.r.t. `nlp.disabled` set in the config.
10
72
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def test_enable_disable_conflict_with_config(): nlp = English() nlp.add_pipe("tagger") nlp.add_pipe("senter") nlp.add_pipe("sentencizer") with make_tempdir() as tmp_dir: nlp.to_disk(tmp_dir) # Expected to fail, as config and arguments conflict. with pytest.raises(ValueError): spacy.load( tmp_dir, enable=["tagger"], config={"nlp": {"disabled": ["senter"]}} ) # Expected to succeed without warning due to the lack of a conflicting config option. spacy.load(tmp_dir, enable=["tagger"]) # Expected to succeed with a warning, as disable=[] should override the config setting. with pytest.warns(UserWarning): spacy.load( tmp_dir, enable=["tagger"], disable=[], config={"nlp": {"disabled": ["senter"]}}, ) ``` ###Assistant : Test conflict between enable/disable w.r.t. `nlp.disabled` set in the config.
1,297
async def test_logs_streaming(job_manager): stream_logs_script = stream_logs_cmd = f'python -c "{stream_logs_script}"' job_id = await job_manager.submit_job(entrypoint=stream_logs_cmd) await async_wait_for_condition( lambda: "STREAMED" in job_manager.get_job_logs(job_id) ) job_manager.stop_job(job_id) @pytest.mark.asyncio
Test that logs are streamed during the job, not just at the end. import time print('STREAMED') while True: time.sleep(1)
19
23
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python async def test_logs_streaming(job_manager): stream_logs_script = stream_logs_cmd = f'python -c "{stream_logs_script}"' job_id = await job_manager.submit_job(entrypoint=stream_logs_cmd) await async_wait_for_condition( lambda: "STREAMED" in job_manager.get_job_logs(job_id) ) job_manager.stop_job(job_id) @pytest.mark.asyncio ``` ###Assistant : Test that logs are streamed during the job, not just at the end. import time print('STREAMED') while True: time.sleep(1)
1,298
def _train_no_recompute(n_steps): img_dim, n_channels, batch_size = 256, 1, 4 x, y = _get_dummy_data(img_dim, n_channels, batch_size) model = _get_big_cnn_model( img_dim, n_channels, num_partitions=3, blocks_per_partition=2 ) optimizer = optimizers.SGD() losses = [] tr_vars = model.trainable_variables for _ in range(n_steps): with tf.GradientTape() as tape: logits = model(x) loss = _compute_loss(logits, y) losses.append(loss) grads = tape.gradient(loss, tr_vars) # tr_vars optimizer.apply_gradients(zip(grads, tr_vars)) del grads return losses
Trains a single large model without gradient checkpointing.
8
60
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _train_no_recompute(n_steps): img_dim, n_channels, batch_size = 256, 1, 4 x, y = _get_dummy_data(img_dim, n_channels, batch_size) model = _get_big_cnn_model( img_dim, n_channels, num_partitions=3, blocks_per_partition=2 ) optimizer = optimizers.SGD() losses = [] tr_vars = model.trainable_variables for _ in range(n_steps): with tf.GradientTape() as tape: logits = model(x) loss = _compute_loss(logits, y) losses.append(loss) grads = tape.gradient(loss, tr_vars) # tr_vars optimizer.apply_gradients(zip(grads, tr_vars)) del grads return losses ``` ###Assistant : Trains a single large model without gradient checkpointing.
1,299
def _make_replica_execution_function(model, mode): if mode == ModeKeys.TRAIN: func = model.train_on_batch elif mode == ModeKeys.TEST: func = model.test_on_batch else:
A single step of the distributed execution on a replica.
10
18
Python
###User : Below is a Python method which does a task. Create a documentation for the below code : ```Python def _make_replica_execution_function(model, mode): if mode == ModeKeys.TRAIN: func = model.train_on_batch elif mode == ModeKeys.TEST: func = model.test_on_batch else: ``` ###Assistant : A single step of the distributed execution on a replica.