def_canonical_mask(mask: Optional[Tensor],mask_name:str,other_type: Optional[DType],other_name:str,target_type: DType,check_other:bool=True,)-> Optional[Tensor]:if mask isnotNone:_mask_dtype = mask.dtype_mask_is_float = torch.is_floating_point(mask)if _mask_dtype != torch.boolandnot _mask_is_float:raise AssertionError(f"only bool and floating types of {mask_name} are supported")if check_other and other_type isnotNone:if _mask_dtype != other_type:warnings.warn(f"Support for mismatched {mask_name} and {other_name} ""is deprecated. Use same type for both instead.")ifnot _mask_is_float:mask =(torch.zeros_like(mask, dtype=target_type).masked_fill_(mask,float("-inf")))return mask
github 查看: github链接
def_canonical_mask(mask: Optional[Tensor],mask_name:str,other_type: Optional[DType],other_name:str,target_type: DType,check_other:bool=True,)-> Optional[Tensor]:if mask isnotNone:_mask_dtype = mask.dtype_mask_is_float = torch.is_floating_point(mask)if _mask_dtype != torch.boolandnot _mask_is_float:raise AssertionError(f"only bool and floating types of {mask_name} are supported")if check_other and other_type isnotNone:if _mask_dtype != other_type:warnings.warn(f"Support for mismatched {mask_name} and {other_name} ""is deprecated. Use same type for both instead.")ifnot _mask_is_float:mask = torch.zeros_like(mask, dtype=target_type).masked_fill_(mask,float("-inf"))return mask