diff --git a/docs/html/.buildinfo b/docs/html/.buildinfo new file mode 100644 index 0000000..c579169 --- /dev/null +++ b/docs/html/.buildinfo @@ -0,0 +1,4 @@ +# Sphinx build info version 1 +# This file records the configuration used when building these files. When it is not found, a full rebuild will be done. +config: c457bdf30d2a3c56394067910656aaac +tags: 645f666f9bcd5a90fca523b33c5a78b7 diff --git a/docs/html/_modules/config.html b/docs/html/_modules/config.html new file mode 100644 index 0000000..cc0b44d --- /dev/null +++ b/docs/html/_modules/config.html @@ -0,0 +1,166 @@ + + + + + + + config — netrans 0.1 文档 + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +

config 源代码

+
+import os
+import sys
+from utils import check_path, AttributeCopier, create_cls
+import subprocess
+
+
+[文档] +class Config(AttributeCopier): + """从实例化的 Netrans 中解析模型参数,并基于pnnacc 生成配置文件模板 + + Args: + Netrans (class): 实例化的Netrans类,包含 模型信息 和 Netrans 信息 + """ + def __init__(self, source_obj) -> None: + """从实例化的 Netrans 中解析模型参数 + + Args: + source_obj (class): 实例化的Netrans类,包含 模型信息 和 Netrans 信息 + + """ + super().__init__(source_obj) + + @check_path + def inputmeta_gen(self): + """生成配置文件模板 + + Return: + None + """ + netrans_path = self.netrans + network_name = self.model_name + # 进入网络名称指定的目录 + # os.chdir(network_name) + # check_env(network_name) + + # 执行 pegasus 命令 + cmd = f"{netrans_path} generate inputmeta --model {network_name}.json --separated-database" + try : + result = subprocess.run(cmd, shell=True, capture_output=True, text=True) + except : + raise RuntimeError('config failed')
+ + # os.chdir("..") + +# def main(): + +# # 检查命令行参数数量是否正确 +# if len(sys.argv) != 2: +# print("Enter a network name!") +# sys.exit(2) + +# # 检查提供的目录是否存在 +# network_name = sys.argv[1] +# # 构建 netrans 可执行文件的路径 +# netrans_path =os.getenv('NETRANS_PATH') +# cla = create_cls(netrans_path, network_name) +# func = InputmetaGen(cla) +# func.inputmeta_gen() + + +# if __name__ == '__main__': +# main() +
+ +
+ +
+
+ +
+
+ + + + + + + \ No newline at end of file diff --git a/docs/html/_modules/example.html b/docs/html/_modules/example.html new file mode 100644 index 0000000..88bd4d4 --- /dev/null +++ b/docs/html/_modules/example.html @@ -0,0 +1,173 @@ + + + + + + + example — netrans 0.1 文档 + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +

example 源代码

+#!/usr/bin/env python3
+
+import argparse
+from netrans import Netrans
+
+
+[文档] +def main(): + # 创建参数解析器 + parser = argparse.ArgumentParser( + description='神经网络模型转换工具', + formatter_class=argparse.ArgumentDefaultsHelpFormatter # 自动显示默认值 + ) + + # 必填位置参数 + parser.add_argument( + 'model_path', + type=str, + help='输入模型路径(必须参数)' + ) + + # 可选参数组 + quant_group = parser.add_argument_group('量化参数') + quant_group.add_argument( + '-q', '--quantize_type', + type=str, + choices=['uint8', 'int8', 'int16', 'float'], + default='uint8', + metavar='TYPE', + help='量化类型(可选值:%(choices)s)' + ) + quant_group.add_argument( + '-m', '--mean', + type=int, + default=0, + help='归一化均值(默认:%(default)s)' + ) + quant_group.add_argument( + '-s', '--scale', + type=float, + default=1.0, + help='量化缩放系数(默认:%(default)s)' + ) + parser.add_argument( + '-p', '--profile', + action='store_true', # 设置为True当参数存在时 + help='启用性能分析模式(默认:%(default)s)' + ) + + + # 解析参数 + args = parser.parse_args() + + # 执行模型转换 + try: + model = Netrans(model_path=args.model_path) + model.model2nbg( + quantize_type=args.quantize_type, + mean=args.mean, + scale=args.scale, + profile=args.profile + ) + print(f"模型 {args.model_path} 转换成功") + except FileNotFoundError: + print(f"错误:模型文件 {args.model_path} 不存在") + exit(1)
+ + +if __name__ == "__main__": + main() +
+ +
+ +
+
+ +
+
+ + + + + + + \ No newline at end of file diff --git a/docs/html/_modules/export.html b/docs/html/_modules/export.html new file mode 100644 index 0000000..a854930 --- /dev/null +++ b/docs/html/_modules/export.html @@ -0,0 +1,285 @@ + + + + + + + export — netrans 0.1 文档 + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +

export 源代码

+import os
+import sys
+import subprocess
+import shutil
+from utils import check_path, AttributeCopier, create_cls
+# 检查 NETRANS_PATH 环境变量是否设置
+
+# 定义数据集文件路径
+dataset = 'dataset.txt'
+
+
+[文档] +class Export(AttributeCopier): + """从实例化的 Netrans 中解析模型参数,并基于 pnnacc 导出模型ngb文件 + + Args: + Netrans (class): 实例化的Netrans类,包含 模型信息 和 Netrans 信息 + """ + def __init__(self, source_obj) -> None: + """从实例化的 Netrans 中解析模型参数 + + Args: + source_obj (class): 实例化的Netrans类,包含 模型信息 和 Netrans 信息 + + """ + super().__init__(source_obj) + + @check_path + def export_network(self): + """基于 pnnacc 导出模型 + """ + + netrans = self.netrans + quantized = self.quantize_type + name = self.model_name + netrans_path = self.netrans_path + + ovxgenerator = netrans + " export ovxlib" + # 进入模型目录 + # os.chdir(name) + + # 根据量化类型设置参数 + if quantized == 'float': + type_ = 'float' + quantization_type = 'none_quantized' + generate_path = './wksp/none_quantized' + elif quantized == 'uint8': + type_ = 'quantized' + quantization_type = 'asymmetric_affine' + generate_path = './wksp/asymmetric_affine' + elif quantized == 'int8': + type_ = 'quantized' + quantization_type = 'dynamic_fixed_point-8' + generate_path = './wksp/dynamic_fixed_point-8' + elif quantized == 'int16': + type_ = 'quantized' + quantization_type = 'dynamic_fixed_point-16' + generate_path = './wksp/dynamic_fixed_point-16' + else: + print("=========== wrong quantization_type ! ( float / uint8 / int8 / int16 )===========") + sys.exit(1) + + # 创建输出目录 + os.makedirs(generate_path, exist_ok=True) + + # 构建命令 + if quantized == 'float': + cmd = f"{ovxgenerator} \ + --model {name}.json \ + --model-data {name}.data \ + --dtype {type_} \ + --pack-nbg-viplite \ + --optimize 'VIP8000NANOQI_PLUS_PID0XB1'\ + --target-ide-project 'linux64' \ + --viv-sdk {netrans_path}/pnna_sdk \ + --output-path {generate_path}/{name}_{quantization_type}" + else: + if not os.path.exists(f"{name}_{quantization_type}.quantize"): + print(f"\033[31m Can not find {name}_{quantization_type}.quantize \033[0m") + sys.exit(1) + else : + if not os.path.exists(f"{name}_postprocess_file.yml"): + cmd = f"{ovxgenerator} \ + --model {name}.json \ + --model-data {name}.data \ + --dtype {type_} \ + --pack-nbg-viplite \ + --optimize 'VIP8000NANOQI_PLUS_PID0XB1'\ + --viv-sdk {netrans_path}/pnna_sdk \ + --model-quantize {name}_{quantization_type}.quantize \ + --with-input-meta {name}_inputmeta.yml \ + --target-ide-project 'linux64' \ + --output-path {generate_path}/{quantization_type}" + else: + cmd = f"{ovxgenerator} \ + --model {name}.json \ + --model-data {name}.data \ + --dtype {type_} \ + --pack-nbg-viplite \ + --optimize 'VIP8000NANOQI_PLUS_PID0XB1'\ + --viv-sdk {netrans_path}/pnna_sdk \ + --model-quantize {name}_{quantization_type}.quantize \ + --with-input-meta {name}_inputmeta.yml \ + --target-ide-project 'linux64' \ + --postprocess-file {name}_postprocess_file.yml \ + --output-path {generate_path}/{quantization_type}" + + result = subprocess.run(cmd, shell=True, capture_output=True, text=True) + + # 检查执行结果 + if result.returncode == 0: + print("\033[31m SUCCESS \033[0m") + else: + print(f"\033[31m ERROR ! {result.stderr} \033[0m") + + + # temp='wksp/temp' + # os.makedirs(temp, exist_ok=True) + + source_dir = f"{generate_path}_nbg_viplite" + target_dir = generate_path + src_ngb = f"{source_dir}/network_binary.nb" + if self.profile: + try: + # 如果目标路径已存在,先删除(确保移动操作能成功) + if os.path.exists(target_dir): + shutil.rmtree(target_dir) + # 移动整个目录到目标位置 + shutil.move(source_dir, target_dir) + # print(f"Successfully moved directory {source_dir} to {target_dir}") + except Exception as e: + sys.exit(1) # 非零退出码表示错误 + # print(f"Error moving directory: {e}") + else: + try: + # 仅复制network_binary.nb文件 + shutil.rmtree(generate_path) + os.mkdir(generate_path) + shutil.copy(src_ngb, generate_path) + + # print(f"Successfully copied {src_ngb} to {generate_path}") + except FileNotFoundError: + print(f"Error: {src_ngb} is not found") + except Exception as e: + print(f"Error occurred: {e}") + + try: + # 清理源目录 + shutil.rmtree(source_dir) + # print(f"Removed source directory {source_dir}") + except Exception as e: + # print(f"Error removing directory: {e}") + sys.exit(1) # 非零退出码表示错误
+ + +
+[文档] +def main(): +# 检查命令行参数数量 + if len(sys.argv) < 3: + print("Input a network name and quantized type ( float / uint8 / int8 / int16 )") + sys.exit(1) + # 检查网络目录是否存在 + network_name = sys.argv[1] + # check_env(network_name) + if not os.path.exists(os.path.exists(network_name)): + print(f"Directory {network_name} does not exist !") + sys.exit(2) + + netrans_path = os.environ['NETRANS_PATH'] + # netrans = os.path.join(os.environ['NETRANS_PATH'], 'pnnacc') + # 调用导出函数ss + cla = create_cls(netrans_path, network_name, sys.argv[2]) + func = Export(cla) + func.export_network()
+ + + # export_network(netrans, network_name, sys.argv[2]) + + +if __name__ == '__main__': + main() +
+ +
+ +
+
+ +
+
+ + + + + + + \ No newline at end of file diff --git a/docs/html/_modules/import_model.html b/docs/html/_modules/import_model.html new file mode 100644 index 0000000..1f34baa --- /dev/null +++ b/docs/html/_modules/import_model.html @@ -0,0 +1,443 @@ + + + + + + + import_model — netrans 0.1 文档 + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +

import_model 源代码

+import os 
+import sys
+import subprocess
+from utils import check_path, AttributeCopier, create_cls
+
+
+[文档] +def check_status(result): + """解析命令执行情况 + + Args: + result (return of subprocrss.run): subprocess.run的返回值 + """ + if result.returncode == 0: + print("\033[31m LOAD MODEL SUCCESS \033[0m") + else: + print(f"\033[31m ERROR: {result.stderr} \033[0m")
+ + + +
+[文档] +def import_caffe_network(name, netrans_path): + """导入 caffe 模型 + + Args: + name (str): 模型名字 + netrans_path (str): 模型路径 + + Returns: + cmd (str): 生成的pnnacc 命令行, 被subprocesses执行 + """ + # 定义转换工具的路径 + convert_caffe =netrans_path + " import caffe" + + # 定义模型文件路径 + model_json_path = f"{name}.json" + model_data_path = f"{name}.data" + model_prototxt_path = f"{name}.prototxt" + model_caffemodel_path = f"{name}.caffemodel" + + # 打印转换信息 + print(f"=========== Converting {name} Caffe model ===========") + + # 构建转换命令 + if os.path.isfile(model_caffemodel_path): + cmd = f"{convert_caffe} \ + --model {model_prototxt_path} \ + --weights {model_caffemodel_path} \ + --output-model {model_json_path} \ + --output-data {model_data_path}" + else: + print("=========== fake Caffe model data file =============") + cmd = f"{convert_caffe} \ + --model {model_prototxt_path} \ + --output-model {model_json_path} \ + --output-data {model_data_path}" + + # 执行转换命令 + # print(cmd) + # os.system(cmd) + return cmd
+ + +
+[文档] +def import_tensorflow_network(name, netrans_path): + """导入 tensorflow 模型 + + Args: + name (str): 模型名字 + netrans_path (str): 模型路径 + + Returns: + cmd (str): 生成的pnnacc 命令行, 被subprocesses执行 + """ + # 定义转换工具的命令 + convertf_cmd = f"{netrans_path} import tensorflow" + + # 打印转换信息 + print(f"=========== Converting {name} Tensorflow model ===========") + + # 读取 inputs_outputs.txt 文件中的参数 + with open('inputs_outputs.txt', 'r') as f: + inputs_outputs_params = f.read().strip() + + # 构建转换命令 + cmd = f"{convertf_cmd} \ + --model {name}.pb \ + --output-data {name}.data \ + --output-model {name}.json \ + {inputs_outputs_params}" + + # 执行转换命令 + # print(cmd) + return cmd
+ + + # result = subprocess.run(cmd, shell=True, capture_output=True, text=True) + + # 检查执行结果 + # check_status(result) + +
+[文档] +def import_onnx_network(name, netrans_path): + """导入 onnx 模型 + + Args: + name (str): 模型名字 + netrans_path (str): 模型路径 + + Returns: + cmd (str): 生成的pnnacc 命令行, 被subprocesses执行 + """ + # 定义转换工具的命令 + convert_onnx_cmd = f"{netrans_path} import onnx" + + # 打印转换信息 + print(f"=========== Converting {name} ONNX model ===========") + if os.path.exists(f"{name}_outputs.txt"): + output_path = os.path.join(os.getcwd(), name+"_outputs.txt") + with open(output_path, 'r', encoding='utf-8') as file: + outputs = str(file.readline().strip()) + + cmd = f"{convert_onnx_cmd} \ + --model {name}.onnx \ + --output-model {name}.json \ + --output-data {name}.data \ + --outputs '{outputs}'" + else: + # 构建转换命令 + cmd = f"{convert_onnx_cmd} \ + --model {name}.onnx \ + --output-model {name}.json \ + --output-data {name}.data" + + # 执行转换命令 + # print(cmd) + return cmd
+ + + # result = subprocess.run(cmd, shell=True, capture_output=True, text=True) + + # 检查执行结果 + # check_status(result) + +####### TFLITE +
+[文档] +def import_tflite_network(name, netrans_path): + """导入 tflite 模型 + + Args: + name (str): 模型名字 + netrans_path (str): 模型路径 + + Returns: + cmd (str): 生成的pnnacc 命令行, 被subprocesses执行 + """ + # 定义转换工具的路径或命令 + convert_tflite = f"{netrans_path} import tflite" + + # 定义模型文件路径 + model_json_path = f"{name}.json" + model_data_path = f"{name}.data" + model_tflite_path = f"{name}.tflite" + + # 打印转换信息 + print(f"=========== Converting {name} TFLite model ===========") + + # 构建转换命令 + cmd = f"{convert_tflite} \ + --model {model_tflite_path} \ + --output-model {model_json_path} \ + --output-data {model_data_path}" + + # 执行转换命令 + # print(cmd) + return cmd
+ + + # result = subprocess.run(cmd, shell=True, capture_output=True, text=True) + + # 检查执行结果 + # check_status(result) + + +
+[文档] +def import_darknet_network(name, netrans_path): + """导入 darknet 模型 + + Args: + name (str): 模型名字 + netrans_path (str): 模型路径 + + Returns: + cmd (str): 生成的pnnacc 命令行, 被subprocesses执行 + """ + # 定义转换工具的命令 + convert_darknet_cmd = f"{netrans_path} import darknet" + + # 打印转换信息 + print(f"=========== Converting {name} darknet model ===========") + + # 构建转换命令 + cmd = f"{convert_darknet_cmd} \ + --model {name}.cfg \ + --weight {name}.weights \ + --output-model {name}.json \ + --output-data {name}.data" + + # 执行转换命令 + # print(cmd) + return cmd + + result = subprocess.run(cmd, shell=True, capture_output=True, text=True) + + # 检查执行结果 + check_status(result)
+ + +
+[文档] +def import_pytorch_network(name, netrans_path): + """导入 pytorch 模型 + + Args: + name (str): 模型名字 + netrans_path (str): 模型路径 + + Returns: + cmd (str): 生成的pnnacc 命令行, 被subprocesses执行 + """ + # 定义转换工具的命令 + convert_pytorch_cmd = f"{netrans_path} import pytorch" + + # 打印转换信息 + print(f"=========== Converting {name} pytorch model ===========") + + # 读取 input_size.txt 文件中的参数 + try: + with open('input_size.txt', 'r') as file: + input_size_params = ' '.join(file.readlines()) + except FileNotFoundError: + print("Error: input_size.txt not found.") + sys.exit(1) + + # 构建转换命令 + cmd = f"{convert_pytorch_cmd} \ + --model {name}.pt \ + --output-model {name}.json \ + --output-data {name}.data \ + {input_size_params}" + + # 执行转换命令 + # print(cmd) + return cmd + + result = subprocess.run(cmd, shell=True, capture_output=True, text=True) + + # 检查执行结果 + check_status(result)
+ + +# 使用示例 +# import_tensorflow_network('model_name', '/path/to/NETRANS_PATH') +
+[文档] +class ImportModel(AttributeCopier): + """从实例化的 Netrans 中解析模型参数,并基于 pnnacc 导入模型 + + Args: + Netrans (class): 实例化的Netrans类,包含 模型信息 和 Netrans 信息 + """ + def __init__(self, source_obj) -> None: + """从实例化的 Netrans 中解析模型参数 + + Args: + source_obj (class): 实例化的Netrans类,包含 模型信息 和 Netrans 信息 + + """ + super().__init__(source_obj) + # print(source_obj.__dict__) + + @check_path + def import_network(self): + """基于 pnnacc 导入模型 + + Raises: + FileExistsError: 如果不存在模型文件则会报错 FileExistsError + RuntimeError: 如果执行导入失败则会报 RuntimeError + """ + if self.verbose is True : + print("begin load model") + # print(self.model_path) + print(os.getcwd()) + print(f"{self.model_name}.weights") + name = self.model_name + netrans_path = self.netrans + if os.path.isfile(f"{name}.prototxt"): + cmd = import_caffe_network(name, netrans_path) + elif os.path.isfile(f"{name}.pb"): + cmd = import_tensorflow_network(name, netrans_path) + elif os.path.isfile(f"{name}.onnx"): + cmd = import_onnx_network(name, netrans_path) + elif os.path.isfile(f"{name}.tflite"): + cmd = import_tflite_network(name, netrans_path) + elif os.path.isfile(f"{name}.weights"): + cmd = import_darknet_network(name, netrans_path) + elif os.path.isfile(f"{name}.pt"): + cmd = import_pytorch_network(name, netrans_path) + else : + raise FileExistsError("Can not find suitable model files") + try : + result = subprocess.run(cmd, shell=True, capture_output=True, text=True) + except : + raise RuntimeError("load model failed") + # 检查执行结果 + check_status(result)
+ + # os.chdir("..") + + +# def main(): +# if len(sys.argv) != 2 : +# print("Input a network") +# sys.exit(-1) + +# network_name = sys.argv[1] +# # check_env(network_name) + +# netrans_path = os.environ['NETRANS_PATH'] +# # netrans = os.path.join(netrans_path, 'pnnacc') +# clas = create_cls(netrans_path, network_name,verbose=False) +# func = ImportModel(clas) +# func.import_network() +# if __name__ == "__main__": +# main() +
+ +
+ +
+
+ +
+
+ + + + + + + \ No newline at end of file diff --git a/docs/html/_modules/index.html b/docs/html/_modules/index.html new file mode 100644 index 0000000..699f12d --- /dev/null +++ b/docs/html/_modules/index.html @@ -0,0 +1,109 @@ + + + + + + + 概览:模块代码 — netrans 0.1 文档 + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +

代码可用的所有模块

+ + +
+ +
+
+ +
+
+ + + + + + + \ No newline at end of file diff --git a/docs/html/_modules/infer.html b/docs/html/_modules/infer.html new file mode 100644 index 0000000..54f85c1 --- /dev/null +++ b/docs/html/_modules/infer.html @@ -0,0 +1,204 @@ + + + + + + + infer — netrans 0.1 文档 + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +

infer 源代码

+import os
+import sys
+import subprocess
+from utils import check_path, AttributeCopier, create_cls
+
+
+[文档] +class Infer(AttributeCopier): + def __init__(self, source_obj) -> None: + super().__init__(source_obj) + + @check_path + def inference_network(self): + netrans = self.netrans + quantized = self.quantize_type + name = self.model_name + # print(self.__dict__) + + netrans += " inference" + # 进入模型目录 + + # 定义类型和量化类型 + if quantized == 'float': + type_ = 'float32' + quantization_type = 'float32' + elif quantized == 'uint8': + quantization_type = 'asymmetric_affine' + type_ = 'quantized' + elif quantized == 'int8': + quantization_type = 'dynamic_fixed_point-8' + type_ = 'quantized' + elif quantized == 'int16': + quantization_type = 'dynamic_fixed_point-16' + type_ = 'quantized' + else: + print("=========== wrong quantization_type ! ( float / uint8 / int8 / int16 )===========") + sys.exit(-1) + + # 构建推理命令 + inf_path = './inf' + cmd = f"{netrans} \ + --dtype {type_} \ + --batch-size 1 \ + --model-quantize {name}_{quantization_type}.quantize \ + --model {name}.json \ + --model-data {name}.data \ + --output-dir {inf_path} \ + --with-input-meta {name}_inputmeta.yml \ + --device CPU" + + # 执行推理命令 + if self.verbose is True: + print(cmd) + result = subprocess.run(cmd, shell=True, capture_output=True, text=True) + + # 检查执行结果 + if result.returncode == 0: + print("\033[32m SUCCESS \033[0m") + else: + print(f"\033[31m ERROR: {result.stderr} \033[0m")
+ + + # 返回原始目录 + +
+[文档] +def main(): + # 检查命令行参数数量 + if len(sys.argv) < 3: + print("Input a network name and quantized type ( float / uint8 / int8 / int16 )") + sys.exit(-1) + + # 检查网络目录是否存在 + network_name = sys.argv[1] + if not os.path.exists(network_name): + print(f"Directory {network_name} does not exist !") + sys.exit(-2) + # print("here") + # 定义 netrans 路径 + # netrans = os.path.join(os.environ['NETRANS_PATH'], 'pnnacc') + network_name = sys.argv[1] + # check_env(network_name) + + netrans_path = os.environ['NETRANS_PATH'] + # netrans = os.path.join(netrans_path, 'pnnacc') + quantize_type = sys.argv[2] + cla = create_cls(netrans_path, network_name,quantize_type,False) + + # 调用量化函数 + func = Infer(cla) + func.inference_network()
+ + + # 定义数据集文件路径 + # dataset_path = './dataset.txt' + # 调用推理函数 + # inference_network(network_name, sys.argv[2]) + +if __name__ == '__main__': + # print("main") + main() +
+ +
+ +
+
+ +
+
+ + + + + + + \ No newline at end of file diff --git a/docs/html/_modules/quantize.html b/docs/html/_modules/quantize.html new file mode 100644 index 0000000..5103bbc --- /dev/null +++ b/docs/html/_modules/quantize.html @@ -0,0 +1,211 @@ + + + + + + + quantize — netrans 0.1 文档 + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +

quantize 源代码

+import os
+import sys
+from utils import check_path, AttributeCopier, create_cls
+
+
+[文档] +class Quantize(AttributeCopier): + """ + 解析 Netrans 参数,基于 pnnacc 量化模型 + Args: + cla (class): 实例化以后的 Netrans 类,需要解析里面包含的参数 + """ + def __init__(self, source_obj) -> None: + """ + 从 Netrans 类中获取模型信息 + Args: + source_obj (class): 实例化以后的 Netrans 类,需要解析里面包含的参数 + """ + super().__init__(source_obj) + + @check_path + def quantize_network(self): + """基于 pnnacc 量化模型 + """ + netrans = self.netrans + quantized_type = self.quantize_type + name = self.model_name + # check_env(name) + # print(os.getcwd()) + netrans += " quantize" + # 根据量化类型设置量化参数 + if quantized_type == 'float': + print("=========== do not need quantized===========") + return + elif quantized_type == 'uint8': + quantization_type = "asymmetric_affine" + elif quantized_type == 'int8': + quantization_type = "dynamic_fixed_point-8" + elif quantized_type == 'int16': + quantization_type = "dynamic_fixed_point-16" + else: + print("=========== wrong quantization_type ! ( uint8 / int8 / int16 )===========") + return + + # 输出量化信息 + print(" =======================================================================") + print(f" ==== Start Quantizing {name} model with type of {quantization_type} ===") + print(" =======================================================================") + current_directory = os.getcwd() + txt_path = current_directory+"/dataset.txt" + with open(txt_path, 'r', encoding='utf-8') as file: + num_lines = len(file.readlines()) + + # 移除已存在的量化文件 + quantize_file = f"{name}_{quantization_type}.quantize" + if os.path.exists(quantize_file): + print(f"\033[31m rm {quantize_file} \033[0m") + os.remove(quantize_file) + + # 构建并执行量化命令 + cmd = f"{netrans} \ + --batch-size 1 \ + --qtype {quantized_type} \ + --rebuild \ + --quantizer {quantization_type.split('-')[0]} \ + --model-quantize {quantize_file} \ + --model {name}.json \ + --model-data {name}.data \ + --with-input-meta {name}_inputmeta.yml \ + --device CPU \ + --algorithm kl_divergence \ + --iterations {num_lines}" + + os.system(cmd) + + # 检查量化结果 + if os.path.exists(quantize_file): + print("\033[31m QUANTIZED SUCCESS \033[0m") + else: + print("\033[31m ERROR ! \033[0m")
+ + + +# def main(): +# # 检查命令行参数数量 +# if len(sys.argv) < 3: +# print("Input a network name and quantized type ( uint8 / int8 / int16 )") +# sys.exit(-1) + +# # 检查网络目录是否存在 +# network_name = sys.argv[1] + +# # 定义 netrans 路径 +# # netrans = os.path.join(os.environ['NETRANS_PATH'], 'pnnacc') +# # network_name = sys.argv[1] +# # check_env(network_name) + +# netrans_path = os.environ['NETRANS_PATH'] +# # netrans = os.path.join(netrans_path, 'pnnacc') +# quantize_type = sys.argv[2] +# cla = create_cls(netrans_path, network_name,quantize_type) + +# # 调用量化函数 +# run = Quantize(cla) +# run.quantize_network() + +# if __name__ == "__main__": +# main() +
+ +
+ +
+
+ +
+
+ + + + + + + \ No newline at end of file diff --git a/docs/html/_modules/quantize_hb.html b/docs/html/_modules/quantize_hb.html new file mode 100644 index 0000000..ae2c702 --- /dev/null +++ b/docs/html/_modules/quantize_hb.html @@ -0,0 +1,200 @@ + + + + + + + quantize_hb — netrans 0.1 文档 + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +

quantize_hb 源代码

+import os
+import sys
+from utils import check_path, AttributeCopier, create_cls
+
+
+[文档] +class Quantize(AttributeCopier): + def __init__(self, source_obj) -> None: + super().__init__(source_obj) + + @check_path + def quantize_network(self): + netrans = self.netrans + quantized_type = self.quantize_type + name = self.model_name + # check_env(name) + # print(os.getcwd()) + netrans += " quantize" + # 根据量化类型设置量化参数 + if quantized_type == 'float': + print("=========== do not need quantized===========") + return + elif quantized_type == 'uint8': + quantization_type = "asymmetric_affine" + elif quantized_type == 'int8': + quantization_type = "dynamic_fixed_point-8" + elif quantized_type == 'int16': + quantization_type = "dynamic_fixed_point-16" + else: + print("=========== wrong quantization_type ! ( uint8 / int8 / int16 )===========") + return + + # 输出量化信息 + print(" =======================================================================") + print(f" ==== Start Quantizing {name} model with type of {quantization_type} ===") + print(" =======================================================================") + + # 移除已存在的量化文件 + quantize_file = f"{name}_{quantization_type}.quantize" + current_directory = os.getcwd() + txt_path = current_directory+"/dataset.txt" + with open(txt_path, 'r', encoding='utf-8') as file: + num_lines = len(file.readlines()) + + + # 构建并执行量化命令 + cmd = f"{netrans} \ + --qtype {quantized_type} \ + --hybrid \ + --quantizer {quantization_type.split('-')[0]} \ + --model-quantize {quantize_file} \ + --model {name}.json \ + --model-data {name}.data \ + --with-input-meta {name}_inputmeta.yml \ + --device CPU \ + --algorithm kl_divergence \ + --divergence-nbins 2048 \ + --iterations {num_lines}" + + os.system(cmd) + + # 检查量化结果 + if os.path.exists(quantize_file): + print("\033[31m QUANTIZED SUCCESS \033[0m") + else: + print("\033[31m ERROR ! \033[0m")
+ + + +
+[文档] +def main(): + # 检查命令行参数数量 + if len(sys.argv) < 3: + print("Input a network name and quantized type ( uint8 / int8 / int16 )") + sys.exit(-1) + + # 检查网络目录是否存在 + network_name = sys.argv[1] + + # 定义 netrans 路径 + # netrans = os.path.join(os.environ['NETRANS_PATH'], 'pnnacc') + # network_name = sys.argv[1] + # check_env(network_name) + + netrans_path = os.environ['NETRANS_PATH'] + # netrans = os.path.join(netrans_path, 'pnnacc') + quantize_type = sys.argv[2] + cla = create_cls(netrans_path, network_name,quantize_type) + + # 调用量化函数 + run = Quantize(cla) + run.quantize_network()
+ + +if __name__ == "__main__": + main() +
+ +
+ +
+
+ +
+
+ + + + + + + \ No newline at end of file diff --git a/docs/html/_modules/utils.html b/docs/html/_modules/utils.html new file mode 100644 index 0000000..5d6228a --- /dev/null +++ b/docs/html/_modules/utils.html @@ -0,0 +1,236 @@ + + + + + + + utils — netrans 0.1 文档 + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +

utils 源代码

+import sys
+import os
+# from functools import wraps
+
+# def check_path(netrans, model_path):
+#     def decorator(func):
+#         @wraps(func)
+#         def wrapper(netrans, model_path, *args, **kargs):
+#             check_dir(model_path)
+#             check_netrans(netrans)
+#             if os.getcwd() != model_path :
+#                 os.chdir(model_path)
+#             return func(netrans, model_path, *args, **kargs)
+#         return wrapper
+#     return decorator
+
+
+[文档] +def check_path(func): + """ 装饰器, 确保在工程目录运行 nertans + + """ + def wrapper(cla, *args, **kargs): + check_netrans(cla.netrans) + if os.getcwd() != cla.model_path : + os.chdir(cla.model_path) + return func(cla, *args, **kargs) + return wrapper
+ + + +
+[文档] +def check_dir(network_name): + """判断工程目录是否存在 + + Args: + network_name (str): 工程目录路径 + + Raises: + NotADirectoryError: 没有那个工程目录 + """ + if not os.path.exists(network_name): + raise NotADirectoryError( + f"Directory not found: {network_name}" + ) + # print(f"Directory {network_name} does not exist !") + # sys.exit(-1) + os.chdir(network_name)
+ + +
+[文档] +def check_netrans(netrans): + """判断 netrans 是否配置成功 + + Args: + netrans (str, bool): _netrans 路径, 如果没有配置(默认为False)会去环境变量里找 + + Raises: + NotADirectoryError: 找不到 Netrans 会返回 NotADirectoryError + """ + if netrans != None and os.path.exists(netrans) is True: + return + if 'NETRANS_PATH' in os.environ : + return + raise NotADirectoryError( + f"Netrans not found: {netrans}" + )
+ + + +
+[文档] +def remove_history_file(name): + os.chdir(name) + if os.path.isfile(f"{name}.json"): + os.remove(f"{name}.json") + if os.path.isfile(f"{name}.data"): + os.remove(f"{name}.data") + os.chdir('..')
+ + +
+[文档] +def check_env(name): + check_dir(name)
+ +# check_netrans() + # remove_history_file(name) + + +
+[文档] +class AttributeCopier: + """快速解析复制 Netrans 信息 + """ + def __init__(self, source_obj) -> None: + self.copy_attribute_name(source_obj) + +
+[文档] + def copy_attribute_name(self, source_obj): + for attribute_name in self._get_attribute_names(source_obj): + setattr(self, attribute_name, getattr(source_obj, attribute_name))
+ + + @staticmethod + def _get_attribute_names(source_obj): + return source_obj.__dict__.keys()
+ + +
+[文档] +class create_cls(): #dataclass @netrans_params + """快速测试时候模拟实例化Netrans""" + def __init__(self, netrans_path, name, quantized_type = 'uint8',verbose=False) -> None: + self.netrans_path = netrans_path + self.netrans = os.path.join(self.netrans_path, 'pnnacc') + self.model_name=self.model_path = name + self.model_path = os.path.abspath(self.model_path) + self.verbose=verbose + self.quantize_type = quantized_type + self.profile = False
+ + + +# if __name__ == "__main__": +# dir_name = "yolo" +# os.mkdir(dir_name) +# check_dir(dir_name) + + +
+ +
+ +
+
+ +
+
+ + + + + + + \ No newline at end of file diff --git a/docs/html/_sources/appendix.rst.txt b/docs/html/_sources/appendix.rst.txt new file mode 100644 index 0000000..a012aa1 --- /dev/null +++ b/docs/html/_sources/appendix.rst.txt @@ -0,0 +1,9 @@ +附录 +============= + +.. toctree:: + :maxdepth: 2 + + gen_api + modules + diff --git a/docs/html/_sources/config.rst.txt b/docs/html/_sources/config.rst.txt new file mode 100644 index 0000000..edbd331 --- /dev/null +++ b/docs/html/_sources/config.rst.txt @@ -0,0 +1,7 @@ +config module +============= + +.. automodule:: config + :members: + :show-inheritance: + :undoc-members: diff --git a/docs/html/_sources/dump.rst.txt b/docs/html/_sources/dump.rst.txt new file mode 100644 index 0000000..ae53927 --- /dev/null +++ b/docs/html/_sources/dump.rst.txt @@ -0,0 +1,7 @@ +dump module +=========== + +.. automodule:: dump + :members: + :show-inheritance: + :undoc-members: diff --git a/docs/html/_sources/example.rst.txt b/docs/html/_sources/example.rst.txt new file mode 100644 index 0000000..a142c2b --- /dev/null +++ b/docs/html/_sources/example.rst.txt @@ -0,0 +1,7 @@ +example module +============== + +.. automodule:: example + :members: + :show-inheritance: + :undoc-members: diff --git a/docs/html/_sources/export.rst.txt b/docs/html/_sources/export.rst.txt new file mode 100644 index 0000000..3a5c46c --- /dev/null +++ b/docs/html/_sources/export.rst.txt @@ -0,0 +1,7 @@ +export module +============= + +.. automodule:: export + :members: + :show-inheritance: + :undoc-members: diff --git a/docs/html/_sources/file_model.rst.txt b/docs/html/_sources/file_model.rst.txt new file mode 100644 index 0000000..e837e7a --- /dev/null +++ b/docs/html/_sources/file_model.rst.txt @@ -0,0 +1,7 @@ +file\_model module +================== + +.. automodule:: file_model + :members: + :show-inheritance: + :undoc-members: diff --git a/docs/html/_sources/gen_api.md.txt b/docs/html/_sources/gen_api.md.txt new file mode 100644 index 0000000..3fab05a --- /dev/null +++ b/docs/html/_sources/gen_api.md.txt @@ -0,0 +1,136 @@ +# gen api html & pdf by sphinx + +netrans 目录结构如下 +```tree +netrans/ +│ +├── docs/ # Sphinx 项目的根目录 +│ ├── source/ # 源文件目录 +│ │ ├── _static/ # 静态文件(如图片、CSS、JS) +│ │ ├── _templates/ # 自定义模板 +│ │ ├── conf.py # 配置文件 +│ │ ├── index.rst # 主页文件 +│ │ └── my_module.rst # 其他文档文件 +│ └── build/ # 构建输出目录(生成的 HTML 文件等) +│ +└── bin/ +└── netrans_cli/ +└── netrans_py/ +``` + +1. `sphinx-quickstart docs/` 快速生成 +2. 修改 `docs/source/conf.py` , + + +### *.rst + +rst, reStructuredText 文件,用于定义文档的结构。通常放在source目录下。 + +rst 是一种和 markdown 类似的语法 + +使用目录树指令 `.. toctree::`,列出其他文档文件。 + + +## 使用 autodoc + Sphinx 实现 python api 文档(html) + +1. 修改 docs/source/conf.py +```py3 +# Configuration file for the Sphinx documentation builder. +# +# For the full list of built-in configuration values, see the documentation: +# https://www.sphinx-doc.org/en/master/usage/configuration.html + +# -- Project information ----------------------------------------------------- +# https://www.sphinx-doc.org/en/master/usage/configuration.html#project-information + +project = 'netrans' +copyright = '2025, ccyh' +author = 'xj' +release = '0.1' + +# -- General configuration --------------------------------------------------- +# https://www.sphinx-doc.org/en/master/usage/configuration.html#general-configuration + +import os +import sys + +sys.path.append('../../netrans_py/') +sys.path.append('../../') + +# Sphinx 扩展 +extensions = [ + 'sphinx.ext.autodoc', # 自动生成文档 + 'sphinx.ext.viewcode', # 添加源代码链接 + 'sphinx.ext.napoleon', # 支持 NumPy 和 Google 风格的 docstring +] + +# 主题 +html_theme = 'sphinx_rtd_theme' + +templates_path = ['_templates'] +exclude_patterns = [] + +language = 'zh' + +# -- Options for HTML output ------------------------------------------------- +# https://www.sphinx-doc.org/en/master/usage/configuration.html#options-for-html-output + +html_theme = 'alabaster' +html_static_path = ['_static'] + +source_suffix = { + '.rst': 'restructuredtext', + '.md': 'markdown', +} +``` + +2. sphinx-apidoc -o docs/source/ . +生成 netrans_py 下所有的 *.py 的rst, 并添加到index.rst里. +```text +# index.rst + +``` + +3. sphinx-build -b html docs/source docs/build + + +## 使用 autodoc + Sphinx 实现 python api 文档(pdf) +1. 在可以生成 html的 基础上, 使用make latexodf 生成 *.tex文件. +这一步会报错,原因是无法识别中文 + +2.修改 netrans.tex文件 +``` +cd build/latex +vim netrans.tex +``` + +在各种usapackage的地方新增: +``` +\usepackage[UTF8, fontset=ubuntu]{ctex} +``` + +3. 使用 xelatex 生成pdf +sphinx使用的是 xelatex 而非 pdflatex +``` +xelatex netrans.tex +``` + + + +## 常见报错 + +报错 +```log +sphinx-quickstart +Traceback (most recent call last): + File "/home/xj/app/miniforge3/envs/sphinx/bin/sphinx-quickstart", line 8, in + sys.exit(main()) + File "/home/xj/app/miniforge3/envs/sphinx/lib/python3.10/site-packages/sphinx/cmd/quickstart.py", line 721, in main + locale.setlocale(locale.LC_ALL, '') + File "/home/xj/app/miniforge3/envs/sphinx/lib/python3.10/locale.py", line 620, in setlocale + return _setlocale(category, locale) +locale.Error: unsupported locale setting +``` + +解决: +export LC_ALL=en_US.UTF-8 diff --git a/docs/html/_sources/import_model.rst.txt b/docs/html/_sources/import_model.rst.txt new file mode 100644 index 0000000..3070a3a --- /dev/null +++ b/docs/html/_sources/import_model.rst.txt @@ -0,0 +1,7 @@ +import\_model module +==================== + +.. automodule:: import_model + :members: + :show-inheritance: + :undoc-members: diff --git a/docs/html/_sources/index.rst.txt b/docs/html/_sources/index.rst.txt new file mode 100644 index 0000000..b879337 --- /dev/null +++ b/docs/html/_sources/index.rst.txt @@ -0,0 +1,22 @@ +.. netrans documentation master file, created by + sphinx-quickstart on Fri Jun 27 15:04:57 2025. + You can adapt this file completely to your liking, but it should at least + contain the root `toctree` directive. + +netrans documentation +===================== +netrans 是一套针对pnna 芯片的模型处理工具,提供命令行工具 netrans_cli 和 python api netrans_py, 其核心功能是将模型权重转换成在pnna芯片上运行的 nbg(network binary graph)格式(.nb 为后缀)。 + +.. toctree:: + :maxdepth: 2 + :caption: Contents: + + quick_start_guide + netrans_cli + netrans_py + appendix + + + + + diff --git a/docs/html/_sources/infer.rst.txt b/docs/html/_sources/infer.rst.txt new file mode 100644 index 0000000..c4956c8 --- /dev/null +++ b/docs/html/_sources/infer.rst.txt @@ -0,0 +1,7 @@ +infer module +============ + +.. automodule:: infer + :members: + :show-inheritance: + :undoc-members: diff --git a/docs/html/_sources/modules.rst.txt b/docs/html/_sources/modules.rst.txt new file mode 100644 index 0000000..9dc7f38 --- /dev/null +++ b/docs/html/_sources/modules.rst.txt @@ -0,0 +1,17 @@ +netrans_py +========== + +.. toctree:: + :maxdepth: 4 + + netrans + config + dump + example + export + file_model + import_model + infer + quantize + quantize_hb + utils diff --git a/docs/html/_sources/netrans.rst.txt b/docs/html/_sources/netrans.rst.txt new file mode 100644 index 0000000..185aa4d --- /dev/null +++ b/docs/html/_sources/netrans.rst.txt @@ -0,0 +1,7 @@ +netrans module +============== + +.. automodule:: netrans + :members: + :show-inheritance: + :undoc-members: diff --git a/docs/html/_sources/netrans_cli.md.txt b/docs/html/_sources/netrans_cli.md.txt new file mode 100644 index 0000000..fac7df1 --- /dev/null +++ b/docs/html/_sources/netrans_cli.md.txt @@ -0,0 +1,192 @@ +# netrans_cli 使用 + +netrans_cli 是 netrans 进行模型转换的命令行工具,使用 ntrans_cli 完成模型转换的步骤如下: + +1. 导入模型 +2. 生成并修改前处理配置文件 *_inputmeta.yml +3. 量化模型 +4. 导出模型 + +## netrans_cli 脚本 + +|脚本|功能|使用| +|:---|---|---| +|load.sh| 模型导入功能,将模型转换成 Pnna 支持的格式| load.sh model_name| +|config.sh| 预处理模版生成功能,生成预处理模版,根据模型进行对于的修改| config.sh model_name| +|quantize.sh| 量化功能, 对模型进行量化生成量化参数文件| quantize.sh model_name quantize_data_type| +|export.sh|导出功能,将量化好的模型导出成 Pnna 上可以运行的runtime| export.sh model_name quantize_data_type| + +对于不同框架下训练的模型,需要准备不同的数据,所有的数据都需要与模型放在同一个文件夹下,模型文件名和文件夹名需要保持一致。 + +## load.sh 模型导入 + +使用 load.sh 导入模型 + +- 用法: load.sh 以模型文件名命名的模型数据文件夹,例如: + + ```bash + load.sh lenet + ``` + + "lenet"是文件夹名,也作为模型名和权重文件名。导入会打印相关日志信息,成功后会打印SUCESS。导入后lenet文件夹应该有"lenet.json"和"lenet.data"文件: + + ```bash + $ ls -lrt lenet + total 3396 + -rwxr-xr-x 1 hope hope 1727201 Nov 5 2018 lenet.pb + -rw-r--r-- 1 hope hope 553 Nov 5 2018 0.jpg + -rwxr--r-- 1 hope hope 6 Apr 21 17:04 dataset.txt + -rw-rw-r-- 1 hope hope 69 Jun 7 09:19 inputs_outputs.txt + -rw-r--r-- 1 hope hope 5553 Jun 7 09:21 lenet.json + -rw-r--r-- 1 hope hope 1725178 Jun 7 09:21 lenet.data + ``` + +## config.sh 预处理配置文件生成 + +使用 config.sh 生成 inputmeta 文件 + +- config.sh 以模型文件名命名的模型数据文件夹,例如: + + ```bash + config.sh lenet + ``` + + inputmeta 文件生成会打印相关日志信息,成功后会打印SUCESS。导入后lenet文件夹应该有 "lenet_inputmeta.yml" 文件: + + ```shell + $ ls -lrt lenet + total 3400 + -rwxr-xr-x 1 hope hope 1727201 Nov 5 2018 lenet.pb + -rw-r--r-- 1 hope hope 553 Nov 5 2018 0.jpg + -rwxr--r-- 1 hope hope 6 Apr 21 17:04 dataset.txt + -rw-rw-r-- 1 hope hope 69 Jun 7 09:19 inputs_outputs.txt + -rw-r--r-- 1 hope hope 5553 Jun 7 09:21 lenet.json + -rw-r--r-- 1 hope hope 1725178 Jun 7 09:21 lenet.data + -rw-r--r-- 1 hope hope 948 Jun 7 09:35 lenet_inputmeta.yml + ``` + + 可以看到,最终生成的是*.yml文件,该文件用于为Netrans中间模型配置输入层数据集合。Netrans中的量化、推理、导出和图片转dat的操作都需要用到这个文件。因此,此步骤不可跳过。 + +Inputmeta.yml文件结构如下: + +```yaml +%YAML 1.2 +--- +# !!!This file disallow TABs!!! +# "category" allowed values: "image, undefined" +# "database" allowed types: "H5FS, SQLITE, TEXT, LMDB, NPY, GENERATOR" +# "tensor_name" only support in H5FS database +# "preproc_type" allowed types:"IMAGE_RGB, IMAGE_RGB888_PLANAR, IMAGE_RGB888_PLANAR_SEP, +IMAGE_I420, +# IMAGE_NV12, IMAGE_YUV444, IMAGE_GRAY, IMAGE_BGRA, TENSOR" +input_meta: + databases: + - path: dataset.txt + type: TEXT + ports: + - lid: data_0 + category: image + dtype: float32 + sparse: false + tensor_name: + layout: nhwc + shape: + - 50 + - 224 + - 224 + - 3 + preprocess: + reverse_channel: false + mean: + - 103.94 + - 116.78 + - 123.67 + scale: 0.017 + preproc_node_params: + preproc_type: IMAGE_RGB + add_preproc_node: false + preproc_perm: + - 0 + - 1 + - 2 + - 3 + - lid: label_0 + redirect_to_output: true + category: undefined + tensor_name: + dtype: float32 + shape: + - 1 + - 1 +``` + +上面示例文件的各个参数解释: + +```{table} +:widths: 20, 80 +:align: left +| 参数 | 说明 | +| :--- | --- | +| input_meta | 预处理参数配置申明。 | +| databases | 数据配置,包括设置 path、type 和 ports 。| +| path | 数据集文件的相对(执行目录)或绝对路径。默认为 dataset.txt, 不建议修改。 | +| type | 数据集文件格式,固定为TEXT。 | +| ports | 指向网络中的输入或重定向的输入,目前只支持一个输入,如果网络存在多个输入,请与@ccyh联系。 | +| lid | 输入层的lid | +| category | 输入的类别。将此参数设置为以下值之一:image(图像输入)或 undefined(其他类型的输入)。 | +| dtype | 输入张量的数据类型,用于将数据发送到 Pnna 网络的输入端口。支持的数据类型包括 float32 和 quantized。 | +| sparse | 指定网络张量是否以稀疏格式存在。将此参数设置为以下值之一:true(稀疏格式)或 false(压缩格式)。 | +| tensor_name | 留空此参数 | +| layout | 输入张量的格式,使用 nchw 用于 Caffe、Darknet、ONNX 和 PyTorch 模型。使用 nhwc 用于 TensorFlow、TensorFlow Lite 和 Keras 模型。 | +| shape | 此张量的形状。第一维,shape[0],表示每批的输入数量,允许在一次推理操作之前将多个输入发送到网络。如果batch维度设置为0,则需要从命令行指定--batch-size。如果 batch维度设置为大于1的值,则直接使用inputmeta.yml中的batch size并忽略命令行中的--batch-size。 | +| fitting | 保留字段 | +| preprocess | 预处理步骤和顺序。预处理支持下面的四个键,键的顺序代表预处理的顺序。您可以相应地调整顺序。 | +| reverse_channel | 指定是否保留通道顺序。将此参数设置为以下值之一:true(保留通道顺序)或 false(不保留通道顺序)。对于 TensorFlow 和 TensorFlow Lite 框架的模型使用 true。 | +| mean | 用于每个通道的均值。 | +| scale | 张量的缩放值。均值和缩放值用于根据公式 (inputTensor - mean) × scale 归一化输入张量。| +| preproc_node_params | 预处理节点参数,在 OVxlib C 项目案例中启用预处理任务 | +| add_preproc_node | 用于处理 OVxlib C 项目案例中预处理节点的插入。[true, false] 中的布尔值,表示通过配置以下参数将预处理层添加到导出的应用程序中。此参数仅在 add_preproc_node 参数设置为 true 时有效。| +| preproc_type | 预处理节点输入类型。 [IMAGE_RGB, IMAGE_RGB888_PLANAR,IMAGE_YUV420, IMAGE_GRAY, IMAGE_BGRA, TENSOR] 中的字符串值 | +| preproc_perm | 预处理节点输入的置换参数。 | +| redirect_to_output | 将database张量重定向到图形输出的特殊属性。如果为该属性设置了一个port,网络构建器将自动为该port生成一个输出层,以便后处理文件可以直接处理来自database的张量。 如果使用网络进行分类,则上例中的lid“input_0”表示输入数据集的标签lid。 您可以设置其他名称来表示标签的lid。 请注意,redirect_to_output 必须设置为 true,以便后处理文件可以直接处理来自database的张量。 标签的lid必须与后处理文件中定义的 labels_tensor 的lid相同。 [true, false] 中的布尔值。 指定是否将由张量表示的输入端口的数据直接发送到网络输出。true(直接发送到网络输出)或 false(不直接发送到网络输出)| +``` + +可以根据实际情况对生成的inputmeta文件进行修改。 + +## quantize.sh 模型量化 + +如果我们训练好的模型的数据类型是float32的,为了使模型以更高的效率在Pnna上运行,我们可以对模型进行量化操作,量化操作可能会带来一定程度的精度损失。 + +- 在netrans_cli目录下使用quantize.sh脚本进行量化操作。 + +用法:./quantize.sh 以模型文件名命名的模型数据文件夹 量化类型,例如: + +```bash +quantize.sh lenet uint8 +``` + +支持的量化类型有:uint8、int8、int16 + +## export.sh 模型导出 + +使用 export.sh 导出模型生成nbg文件。 + +用法:export.sh 以模型文件名命名的模型数据文件夹 数据类型,例如: + +```bash +export.sh lenet uint8 +``` + +导出支持的数据类型:float、uint8、int8、int16,其中使用uint8、int8、int16导出时需要先进行模型量化。导出的工程会在模型所在的目录下面的wksp目录里。 +network_binary.nb文件在"asymmetric_affine"文件夹中: + +```shell +ls -lrt lenet/wksp/asymmetric_affine/ +-rw-r--r-- 1 hope hope 694912 Jun 7 09:55 network_binary.nb +``` + +目前支持将生成的network_binary.nb文件部署到Pnna硬件平台。具体部署方法请参阅模型部署相关文档。 + +## 使用示例 + +请参照examples,examples 提供 [caffe 模型转换示例](./examples/caffe_model.md),[darknet 模型转换示例](./examples/darknet_model.md),[tensorflow 模型转换示例](./examples/tensorflow_model.md),[onnx 模型转换示例](./examples/onnx_model.md)。 diff --git a/docs/html/_sources/netrans_py.md.txt b/docs/html/_sources/netrans_py.md.txt new file mode 100644 index 0000000..654ea87 --- /dev/null +++ b/docs/html/_sources/netrans_py.md.txt @@ -0,0 +1,167 @@ +# netrans_py 使用 + +netrans_py 为 Netrans 编译器的 python 调用接口。 +使用 ntrans_py 完成模型转换的步骤如下: + +1. 导入模型 +2. 生成并修改前处理配置文件 *_inputmeta.yml +3. 量化模型 +4. 导出模型 + +## Netrans 类 + +创建 Netrans + + 描述: 实例化 Netrans 类。 + 代码示例: + + ```py3 + from netrans import Netrans + yolo_netrans = Netrans("../examples/darknet/yolov4_tiny") + ``` + + 参数 + +| 参数名 | 类型 | 说明 | +|:---| -- | -- | +|model_path| str| 第一位置参数,模型文件的路径| +|netans| str | 如果 NETRANS_PATH 没有设置,可通过该参数指定netrans的路径| + +输出返回: +无。 + + + +## Netrans.import 模型导入 + + 描述: 将模型转换成 Pnna 支持的格式。 + 代码示例: + + ```py3 + yolo_netrans.import() + ``` + + 参数: + 无。 + + 输出返回: + 无。 + 在工程目录下生成 Pnna 支持的模型格式,以.json结尾的模型文件和 .data结尾的权重文件。 + +## Netrans.config 预处理配置文件生成 + + 描述: 将模型转换成 Pnna 支持的格式。 + 代码示例: + + ```py3 + yolo_netrans.config() + ``` + + 参数: + +```{table} +:widths: 20, 30, 50 +:align: left + | 参数名 | 类型 | 说明 | +|:---| -- | -- | +|inputmeta| bool,str, [Fasle, True, "inputmeta_filepath"] | 指定 inputmeta, 默认为False。
如果为False,则会生成inputmeta模板,可使用mean、scale、reverse_channel 配合修改常用参数。
如果已有现成的 inputmeta 文件,则可通过该参数进行指定,也可使用True, 则会自动索引 model_name_inputmeta.yml | +|mean| float, int, list | 设置预处理中 normalize 的 mean 参数 | +|scale| float, int, list | 设置预处理中 normalize 的 scale 参数 | +|reverse_channel | bool | 设置预处理中的 reverse_channel 参数 | +``` + + 输出返回: + 无。 + +## Netrans.quantize 模型量化 + +描述: 对模型生成量化配置文件。 +代码示例: + +```py3 +yolo_netrans.quantize("uint8") +``` + +参数: + +```{table} +:widths: 20, 30, 50 +:align: left +| 参数名 | 类型 | 说明 | +|:---| -- | -- | +|quantize_type| str| 第一位置参数,模型量化类型,仅支持 "uint8", "int8", "int16"| +``` + +输出返回: + 无。 + +## Netrans.export 模型导出 + +描述: 对模型生成量化配置文件。 +代码示例: + +```py3 +yolo_netrans.export() +``` + +参数: + 无。 + +输出返回: + 无。请在目录 “wksp/*/” 下检查是否生成nbg文件。 + +## Netrans.model2nbg 模型生成nbg文件 + +描述: 模型导入、量化、及nbg文件生产 +代码示例: + +```py3 + # 无预处理 +yolo_netrans.model2nbg(quantize_type='uint8') + # 需要对数据进行normlize, menas为128, scale 为 0.0039 +yolo_netrans.model2nbg(quantize_type='uint8',mean=128, scale = 0.0039) + # 需要对数据分通道进行normlize, menas为128,127,125,scale 为 0.0039, 且reverse_channel 为 True +yolo_netrans.model2nbg(quantize_type='uint8'mean=[128, 127, 125], scale = 0.0039, reverse_channel= True) + # 已经进行初始化设置 +yolo_netrans.model2nbg(quantize_type='uint8', inputmeta=True) + +``` + +参数 + +```{table} +:widths: 20, 30, 50 +:align: left +| 参数名 | 类型 | 说明 | +|:---| -- | -- | +|quantize_type| str, ["uint8", "int8", "int16" ] | 量化类型,将模型量化成该参数指定的类型 | +|inputmeta| bool,str, [Fasle, True, "inputmeta_filepath"] | 指定 inputmeta, 默认为False。
如果为False,则会生成inputmeta模板,可使用mean、scale、reverse_channel 配合修改常用参数。
如果已有现成的 inputmeta 文件,则可通过该参数进行指定,也可使用True, 则会自动索引 model_name_inputmeta.yml | +|mean| float, int, list | 设置预处理中 normalize 的 mean 参数 | +|scale| float, int, list | 设置预处理中 normalize 的 scale 参数 | +|reverse_channel | bool | 设置预处理中的 reverse_channel 参数 | +``` + +输出返回: +请在目录 “wksp/*/” 下检查是否生成nbg文件。 + +## 使用示例 + + ```py3 +from nertans import Netrans +model_path = 'example/darknet/yolov4_tiny' +netrans_path = "netrans/bin" # 如果进行了export定义申明,这一步可以不用 + +# 初始化netrans +net = Netrans(model_path,netrans=netrans_path) +# 模型载入 +net.import() +# 配置预处理 normlize 的参数 +net.config(scale=1,mean=0) +# 模型量化 +net.quantize("uint8") +# 模型导出 +net.export() + +# 模型直接量化成 int16 并导出, 直接复用刚配置好的 inputmeta +net.model2nbg(quantize_type = "int16", inputmeta=True) +``` diff --git a/docs/html/_sources/quantize.rst.txt b/docs/html/_sources/quantize.rst.txt new file mode 100644 index 0000000..e90888d --- /dev/null +++ b/docs/html/_sources/quantize.rst.txt @@ -0,0 +1,7 @@ +quantize module +=============== + +.. automodule:: quantize + :members: + :show-inheritance: + :undoc-members: diff --git a/docs/html/_sources/quantize_hb.rst.txt b/docs/html/_sources/quantize_hb.rst.txt new file mode 100644 index 0000000..173d001 --- /dev/null +++ b/docs/html/_sources/quantize_hb.rst.txt @@ -0,0 +1,7 @@ +quantize\_hb module +=================== + +.. automodule:: quantize_hb + :members: + :show-inheritance: + :undoc-members: diff --git a/docs/html/_sources/quick_start_guide.md.txt b/docs/html/_sources/quick_start_guide.md.txt new file mode 100644 index 0000000..338720a --- /dev/null +++ b/docs/html/_sources/quick_start_guide.md.txt @@ -0,0 +1,160 @@ +# 快速入门 + +本文档以 onnx 格式的 yolov5s 为例,演示如何快速安装Nertans 并使用 Netrans 量化、编译模型并生成 nbg 文件。 + +## 系统环境 + +- Linux操作系统,推荐 Ubuntu 20.04 或 Debian12 +- Python 3.8 +- RAM 至少 8GB + +## 安装Netrans + +创建 python3.8 环境 + +```bash +wget "https://github.com/conda-forge/miniforge/releases/latest/download/Miniforge3-$(uname)-$(uname -m).sh" +mkdir -p ~/app +INSTALL_PATH="${HOME}/app/miniforge3" +bash Miniforge3-Linux-x86_64.sh -b -p ${INSTALL_PATH} +echo "source "${INSTALL_PATH}/etc/profile.d/conda.sh"" >> ${HOME}/.bashrc +echo "source "${INSTALL_PATH}/etc/profile.d/mamba.sh"" >> ${HOME}/.bashrc +source ${HOME}/.bashrc +mamba create -n netrans python=3.8 -y +mamba activate netrans +``` + +下载 Netrans + +```bash +cd ~/app +git clone https://gitlink.org.cn/nudt_dsp/netrans.git +``` + +配置 Netrans + +```bash +cd ~/app/netrans +./setup.sh +``` + +## 使用 Netrans 编译 yolov5s 模型 + +进入工作目录 + +```bash +cd ~/app/netrans/examples/onnx +``` + +此时目录如下: + +```text +onnx/ +├── README.md +└── yolov5s + ├── 0.jpg + ├── dataset.txt + └── yolov5s.onnx +``` + +### 使用 netrans_cli 编译 yolov5s + +#### 导入模型 + +```bash +load.sh yolov5s +``` + +该命令会在工程目录下生成包含模型信息的 .json 和 .data 数据文件。 + +此时 yolov5s 的目录结构如下 + +```text +yolov5s/ +├── 0.jpg +├── yolov5s.data +├── yolov5s.json +└── yolov5s.onnx +``` + +#### 生成配置文件模板 + +配置文件定义输入数据前处理相关参数。Netrans预定义了配置文件模板生成脚本,用户需根据模型前处理参数对配置文件进行修改。 + +```bash +config.sh yolov5s +``` + +此时 yolov5s 的目录结构如下: + +```text +yolov5s/ +├── 0.jpg +├── dataset.txt +├── yolov5s.data +├── yolov5s_inputmeta.yml +├── yolov5s.json +└── yolov5s.onnx + +``` + +根据 yolov5s 的前处理参数 ,修改 yml 中的 scale 为 0.003921568627。 +打开 ` yolov5s_inputmeta.yml ` 文件,修改第30-33行: + +```text + scale: + - 0.003921568627 + - 0.003921568627 + - 0.003921568627 +``` + +#### 量化模型 + +生成 unit8 量化的量化参数文件 + +```bash +quantize.sh yolov5s uint8 +``` + +此时 yolov5s 的目录结构如下: + +```text +yolov5s/ +├── 0.jpg +├── dataset.txt +├── yolov5s_asymmetric_affine.quantize +├── yolov5s.data +├── yolov5s_inputmeta.yml +├── yolov5s.json +└── yolov5s.onnx +``` + +#### 导出模型 + +导出 unit8 量化的模型项目工程 + +```bash +export.sh yolov5s uint8 +``` + +此时 yolov5s 的目录结构如下: + +```text +yolov5s/ +├── 0.jpg +├── dataset.txt +├── wksp +│ └── asymmetric_affine +│ └── network_binary.nb +├── yolov5s_asymmetric_affine.quantize +├── yolov5s.data +├── yolov5s_inputmeta.yml +├── yolov5s.json +└── yolov5s.onnx +``` + +### 使用 netrans_py 编译 yolov5s 模型 + +```bash +example.py yolov5s -q uint8 -m 0 -s 0.003921568627 +``` diff --git a/docs/html/_sources/setup.rst.txt b/docs/html/_sources/setup.rst.txt new file mode 100644 index 0000000..1084cc6 --- /dev/null +++ b/docs/html/_sources/setup.rst.txt @@ -0,0 +1,7 @@ +setup module +============ + +.. automodule:: setup + :members: + :show-inheritance: + :undoc-members: diff --git a/docs/html/_sources/utils.rst.txt b/docs/html/_sources/utils.rst.txt new file mode 100644 index 0000000..fe1efad --- /dev/null +++ b/docs/html/_sources/utils.rst.txt @@ -0,0 +1,7 @@ +utils module +============ + +.. automodule:: utils + :members: + :show-inheritance: + :undoc-members: diff --git a/docs/html/_static/alabaster.css b/docs/html/_static/alabaster.css new file mode 100644 index 0000000..7e75bf8 --- /dev/null +++ b/docs/html/_static/alabaster.css @@ -0,0 +1,663 @@ +/* -- page layout ----------------------------------------------------------- */ + +body { + font-family: Georgia, serif; + font-size: 17px; + background-color: #fff; + color: #000; + margin: 0; + padding: 0; +} + + +div.document { + width: 940px; + margin: 30px auto 0 auto; +} + +div.documentwrapper { + float: left; + width: 100%; +} + +div.bodywrapper { + margin: 0 0 0 220px; +} + +div.sphinxsidebar { + width: 220px; + font-size: 14px; + line-height: 1.5; +} + +hr { + border: 1px solid #B1B4B6; +} + +div.body { + background-color: #fff; + color: #3E4349; + padding: 0 30px 0 30px; +} + +div.body > .section { + text-align: left; +} + +div.footer { + width: 940px; + margin: 20px auto 30px auto; + font-size: 14px; + color: #888; + text-align: right; +} + +div.footer a { + color: #888; +} + +p.caption { + font-family: inherit; + font-size: inherit; +} + + +div.relations { + display: none; +} + + +div.sphinxsidebar { + max-height: 100%; + overflow-y: auto; +} + +div.sphinxsidebar a { + color: #444; + text-decoration: none; + border-bottom: 1px dotted #999; +} + +div.sphinxsidebar a:hover { + border-bottom: 1px solid #999; +} + +div.sphinxsidebarwrapper { + padding: 18px 10px; +} + +div.sphinxsidebarwrapper p.logo { + padding: 0; + margin: -10px 0 0 0px; + text-align: center; +} + +div.sphinxsidebarwrapper h1.logo { + margin-top: -10px; + text-align: center; + margin-bottom: 5px; + text-align: left; +} + +div.sphinxsidebarwrapper h1.logo-name { + margin-top: 0px; +} + +div.sphinxsidebarwrapper p.blurb { + margin-top: 0; + font-style: normal; +} + +div.sphinxsidebar h3, +div.sphinxsidebar h4 { + font-family: Georgia, serif; + color: #444; + font-size: 24px; + font-weight: normal; + margin: 0 0 5px 0; + padding: 0; +} + +div.sphinxsidebar h4 { + font-size: 20px; +} + +div.sphinxsidebar h3 a { + color: #444; +} + +div.sphinxsidebar p.logo a, +div.sphinxsidebar h3 a, +div.sphinxsidebar p.logo a:hover, +div.sphinxsidebar h3 a:hover { + border: none; +} + +div.sphinxsidebar p { + color: #555; + margin: 10px 0; +} + +div.sphinxsidebar ul { + margin: 10px 0; + padding: 0; + color: #000; +} + +div.sphinxsidebar ul li.toctree-l1 > a { + font-size: 120%; +} + +div.sphinxsidebar ul li.toctree-l2 > a { + font-size: 110%; +} + +div.sphinxsidebar input { + border: 1px solid #CCC; + font-family: Georgia, serif; + font-size: 1em; +} + +div.sphinxsidebar #searchbox { + margin: 1em 0; +} + +div.sphinxsidebar .search > div { + display: table-cell; +} + +div.sphinxsidebar hr { + border: none; + height: 1px; + color: #AAA; + background: #AAA; + + text-align: left; + margin-left: 0; + width: 50%; +} + +div.sphinxsidebar .badge { + border-bottom: none; +} + +div.sphinxsidebar .badge:hover { + border-bottom: none; +} + +/* To address an issue with donation coming after search */ +div.sphinxsidebar h3.donation { + margin-top: 10px; +} + +/* -- body styles ----------------------------------------------------------- */ + +a { + color: #004B6B; + text-decoration: underline; +} + +a:hover { + color: #6D4100; + text-decoration: underline; +} + +div.body h1, +div.body h2, +div.body h3, +div.body h4, +div.body h5, +div.body h6 { + font-family: Georgia, serif; + font-weight: normal; + margin: 30px 0px 10px 0px; + padding: 0; +} + +div.body h1 { margin-top: 0; padding-top: 0; font-size: 240%; } +div.body h2 { font-size: 180%; } +div.body h3 { font-size: 150%; } +div.body h4 { font-size: 130%; } +div.body h5 { font-size: 100%; } +div.body h6 { font-size: 100%; } + +a.headerlink { + color: #DDD; + padding: 0 4px; + text-decoration: none; +} + +a.headerlink:hover { + color: #444; + background: #EAEAEA; +} + +div.body p, div.body dd, div.body li { + line-height: 1.4em; +} + +div.admonition { + margin: 20px 0px; + padding: 10px 30px; + background-color: #EEE; + border: 1px solid #CCC; +} + +div.admonition tt.xref, div.admonition code.xref, div.admonition a tt { + background-color: #FBFBFB; + border-bottom: 1px solid #fafafa; +} + +div.admonition p.admonition-title { + font-family: Georgia, serif; + font-weight: normal; + font-size: 24px; + margin: 0 0 10px 0; + padding: 0; + line-height: 1; +} + +div.admonition p.last { + margin-bottom: 0; +} + +dt:target, .highlight { + background: #FAF3E8; +} + +div.warning { + background-color: #FCC; + border: 1px solid #FAA; +} + +div.danger { + background-color: #FCC; + border: 1px solid #FAA; + -moz-box-shadow: 2px 2px 4px #D52C2C; + -webkit-box-shadow: 2px 2px 4px #D52C2C; + box-shadow: 2px 2px 4px #D52C2C; +} + +div.error { + background-color: #FCC; + border: 1px solid #FAA; + -moz-box-shadow: 2px 2px 4px #D52C2C; + -webkit-box-shadow: 2px 2px 4px #D52C2C; + box-shadow: 2px 2px 4px #D52C2C; +} + +div.caution { + background-color: #FCC; + border: 1px solid #FAA; +} + +div.attention { + background-color: #FCC; + border: 1px solid #FAA; +} + +div.important { + background-color: #EEE; + border: 1px solid #CCC; +} + +div.note { + background-color: #EEE; + border: 1px solid #CCC; +} + +div.tip { + background-color: #EEE; + border: 1px solid #CCC; +} + +div.hint { + background-color: #EEE; + border: 1px solid #CCC; +} + +div.seealso { + background-color: #EEE; + border: 1px solid #CCC; +} + +div.topic { + background-color: #EEE; +} + +p.admonition-title { + display: inline; +} + +p.admonition-title:after { + content: ":"; +} + +pre, tt, code { + font-family: 'Consolas', 'Menlo', 'DejaVu Sans Mono', 'Bitstream Vera Sans Mono', monospace; + font-size: 0.9em; +} + +.hll { + background-color: #FFC; + margin: 0 -12px; + padding: 0 12px; + display: block; +} + +img.screenshot { +} + +tt.descname, tt.descclassname, code.descname, code.descclassname { + font-size: 0.95em; +} + +tt.descname, code.descname { + padding-right: 0.08em; +} + +img.screenshot { + -moz-box-shadow: 2px 2px 4px #EEE; + -webkit-box-shadow: 2px 2px 4px #EEE; + box-shadow: 2px 2px 4px #EEE; +} + +table.docutils { + border: 1px solid #888; + -moz-box-shadow: 2px 2px 4px #EEE; + -webkit-box-shadow: 2px 2px 4px #EEE; + box-shadow: 2px 2px 4px #EEE; +} + +table.docutils td, table.docutils th { + border: 1px solid #888; + padding: 0.25em 0.7em; +} + +table.field-list, table.footnote { + border: none; + -moz-box-shadow: none; + -webkit-box-shadow: none; + box-shadow: none; +} + +table.footnote { + margin: 15px 0; + width: 100%; + border: 1px solid #EEE; + background: #FDFDFD; + font-size: 0.9em; +} + +table.footnote + table.footnote { + margin-top: -15px; + border-top: none; +} + +table.field-list th { + padding: 0 0.8em 0 0; +} + +table.field-list td { + padding: 0; +} + +table.field-list p { + margin-bottom: 0.8em; +} + +/* Cloned from + * https://github.com/sphinx-doc/sphinx/commit/ef60dbfce09286b20b7385333d63a60321784e68 + */ +.field-name { + -moz-hyphens: manual; + -ms-hyphens: manual; + -webkit-hyphens: manual; + hyphens: manual; +} + +table.footnote td.label { + width: .1px; + padding: 0.3em 0 0.3em 0.5em; +} + +table.footnote td { + padding: 0.3em 0.5em; +} + +dl { + margin-left: 0; + margin-right: 0; + margin-top: 0; + padding: 0; +} + +dl dd { + margin-left: 30px; +} + +blockquote { + margin: 0 0 0 30px; + padding: 0; +} + +ul, ol { + /* Matches the 30px from the narrow-screen "li > ul" selector below */ + margin: 10px 0 10px 30px; + padding: 0; +} + +pre { + background: unset; + padding: 7px 30px; + margin: 15px 0px; + line-height: 1.3em; +} + +div.viewcode-block:target { + background: #ffd; +} + +dl pre, blockquote pre, li pre { + margin-left: 0; + padding-left: 30px; +} + +tt, code { + background-color: #ecf0f3; + color: #222; + /* padding: 1px 2px; */ +} + +tt.xref, code.xref, a tt { + background-color: #FBFBFB; + border-bottom: 1px solid #fff; +} + +a.reference { + text-decoration: none; + border-bottom: 1px dotted #004B6B; +} + +a.reference:hover { + border-bottom: 1px solid #6D4100; +} + +/* Don't put an underline on images */ +a.image-reference, a.image-reference:hover { + border-bottom: none; +} + +a.footnote-reference { + text-decoration: none; + font-size: 0.7em; + vertical-align: top; + border-bottom: 1px dotted #004B6B; +} + +a.footnote-reference:hover { + border-bottom: 1px solid #6D4100; +} + +a:hover tt, a:hover code { + background: #EEE; +} + +@media screen and (max-width: 940px) { + + body { + margin: 0; + padding: 20px 30px; + } + + div.documentwrapper { + float: none; + background: #fff; + margin-left: 0; + margin-top: 0; + margin-right: 0; + margin-bottom: 0; + } + + div.sphinxsidebar { + display: block; + float: none; + width: unset; + margin: 50px -30px -20px -30px; + padding: 10px 20px; + background: #333; + color: #FFF; + } + + div.sphinxsidebar h3, div.sphinxsidebar h4, div.sphinxsidebar p, + div.sphinxsidebar h3 a { + color: #fff; + } + + div.sphinxsidebar a { + color: #AAA; + } + + div.sphinxsidebar p.logo { + display: none; + } + + div.document { + width: 100%; + margin: 0; + } + + div.footer { + display: none; + } + + div.bodywrapper { + margin: 0; + } + + div.body { + min-height: 0; + min-width: auto; /* fixes width on small screens, breaks .hll */ + padding: 0; + } + + .hll { + /* "fixes" the breakage */ + width: max-content; + } + + .rtd_doc_footer { + display: none; + } + + .document { + width: auto; + } + + .footer { + width: auto; + } + + .github { + display: none; + } + + ul { + margin-left: 0; + } + + li > ul { + /* Matches the 30px from the "ul, ol" selector above */ + margin-left: 30px; + } +} + + +/* misc. */ + +.revsys-inline { + display: none!important; +} + +/* Hide ugly table cell borders in ..bibliography:: directive output */ +table.docutils.citation, table.docutils.citation td, table.docutils.citation th { + border: none; + /* Below needed in some edge cases; if not applied, bottom shadows appear */ + -moz-box-shadow: none; + -webkit-box-shadow: none; + box-shadow: none; +} + + +/* relbar */ + +.related { + line-height: 30px; + width: 100%; + font-size: 0.9rem; +} + +.related.top { + border-bottom: 1px solid #EEE; + margin-bottom: 20px; +} + +.related.bottom { + border-top: 1px solid #EEE; +} + +.related ul { + padding: 0; + margin: 0; + list-style: none; +} + +.related li { + display: inline; +} + +nav#rellinks { + float: right; +} + +nav#rellinks li+li:before { + content: "|"; +} + +nav#breadcrumbs li+li:before { + content: "\00BB"; +} + +/* Hide certain items when printing */ +@media print { + div.related { + display: none; + } +} + +img.github { + position: absolute; + top: 0; + border: 0; + right: 0; +} \ No newline at end of file diff --git a/docs/html/_static/basic.css b/docs/html/_static/basic.css new file mode 100644 index 0000000..0028826 --- /dev/null +++ b/docs/html/_static/basic.css @@ -0,0 +1,906 @@ +/* + * Sphinx stylesheet -- basic theme. + */ + +/* -- main layout ----------------------------------------------------------- */ + +div.clearer { + clear: both; +} + +div.section::after { + display: block; + content: ''; + clear: left; +} + +/* -- relbar ---------------------------------------------------------------- */ + +div.related { + width: 100%; + font-size: 90%; +} + +div.related h3 { + display: none; +} + +div.related ul { + margin: 0; + padding: 0 0 0 10px; + list-style: none; +} + +div.related li { + display: inline; +} + +div.related li.right { + float: right; + margin-right: 5px; +} + +/* -- sidebar --------------------------------------------------------------- */ + +div.sphinxsidebarwrapper { + padding: 10px 5px 0 10px; +} + +div.sphinxsidebar { + float: left; + width: 230px; + margin-left: -100%; + font-size: 90%; + word-wrap: break-word; + overflow-wrap : break-word; +} + +div.sphinxsidebar ul { + list-style: none; +} + +div.sphinxsidebar ul ul, +div.sphinxsidebar ul.want-points { + margin-left: 20px; + list-style: square; +} + +div.sphinxsidebar ul ul { + margin-top: 0; + margin-bottom: 0; +} + +div.sphinxsidebar form { + margin-top: 10px; +} + +div.sphinxsidebar input { + border: 1px solid #98dbcc; + font-family: sans-serif; + font-size: 1em; +} + +div.sphinxsidebar #searchbox form.search { + overflow: hidden; +} + +div.sphinxsidebar #searchbox input[type="text"] { + float: left; + width: 80%; + padding: 0.25em; + box-sizing: border-box; +} + +div.sphinxsidebar #searchbox input[type="submit"] { + float: left; + width: 20%; + border-left: none; + padding: 0.25em; + box-sizing: border-box; +} + + +img { + border: 0; + max-width: 100%; +} + +/* -- search page ----------------------------------------------------------- */ + +ul.search { + margin-top: 10px; +} + +ul.search li { + padding: 5px 0; +} + +ul.search li a { + font-weight: bold; +} + +ul.search li p.context { + color: #888; + margin: 2px 0 0 30px; + text-align: left; +} + +ul.keywordmatches li.goodmatch a { + font-weight: bold; +} + +/* -- index page ------------------------------------------------------------ */ + +table.contentstable { + width: 90%; + margin-left: auto; + margin-right: auto; +} + +table.contentstable p.biglink { + line-height: 150%; +} + +a.biglink { + font-size: 1.3em; +} + +span.linkdescr { + font-style: italic; + padding-top: 5px; + font-size: 90%; +} + +/* -- general index --------------------------------------------------------- */ + +table.indextable { + width: 100%; +} + +table.indextable td { + text-align: left; + vertical-align: top; +} + +table.indextable ul { + margin-top: 0; + margin-bottom: 0; + list-style-type: none; +} + +table.indextable > tbody > tr > td > ul { + padding-left: 0em; +} + +table.indextable tr.pcap { + height: 10px; +} + +table.indextable tr.cap { + margin-top: 10px; + background-color: #f2f2f2; +} + +img.toggler { + margin-right: 3px; + margin-top: 3px; + cursor: pointer; +} + +div.modindex-jumpbox { + border-top: 1px solid #ddd; + border-bottom: 1px solid #ddd; + margin: 1em 0 1em 0; + padding: 0.4em; +} + +div.genindex-jumpbox { + border-top: 1px solid #ddd; + border-bottom: 1px solid #ddd; + margin: 1em 0 1em 0; + padding: 0.4em; +} + +/* -- domain module index --------------------------------------------------- */ + +table.modindextable td { + padding: 2px; + border-collapse: collapse; +} + +/* -- general body styles --------------------------------------------------- */ + +div.body { + min-width: inherit; + max-width: 800px; +} + +div.body p, div.body dd, div.body li, div.body blockquote { + -moz-hyphens: auto; + -ms-hyphens: auto; + -webkit-hyphens: auto; + hyphens: auto; +} + +a.headerlink { + visibility: hidden; +} + +a:visited { + color: #551A8B; +} + +h1:hover > a.headerlink, +h2:hover > a.headerlink, +h3:hover > a.headerlink, +h4:hover > a.headerlink, +h5:hover > a.headerlink, +h6:hover > a.headerlink, +dt:hover > a.headerlink, +caption:hover > a.headerlink, +p.caption:hover > a.headerlink, +div.code-block-caption:hover > a.headerlink { + visibility: visible; +} + +div.body p.caption { + text-align: inherit; +} + +div.body td { + text-align: left; +} + +.first { + margin-top: 0 !important; +} + +p.rubric { + margin-top: 30px; + font-weight: bold; +} + +img.align-left, figure.align-left, .figure.align-left, object.align-left { + clear: left; + float: left; + margin-right: 1em; +} + +img.align-right, figure.align-right, .figure.align-right, object.align-right { + clear: right; + float: right; + margin-left: 1em; +} + +img.align-center, figure.align-center, .figure.align-center, object.align-center { + display: block; + margin-left: auto; + margin-right: auto; +} + +img.align-default, figure.align-default, .figure.align-default { + display: block; + margin-left: auto; + margin-right: auto; +} + +.align-left { + text-align: left; +} + +.align-center { + text-align: center; +} + +.align-default { + text-align: center; +} + +.align-right { + text-align: right; +} + +/* -- sidebars -------------------------------------------------------------- */ + +div.sidebar, +aside.sidebar { + margin: 0 0 0.5em 1em; + border: 1px solid #ddb; + padding: 7px; + background-color: #ffe; + width: 40%; + float: right; + clear: right; + overflow-x: auto; +} + +p.sidebar-title { + font-weight: bold; +} + +nav.contents, +aside.topic, +div.admonition, div.topic, blockquote { + clear: left; +} + +/* -- topics ---------------------------------------------------------------- */ + +nav.contents, +aside.topic, +div.topic { + border: 1px solid #ccc; + padding: 7px; + margin: 10px 0 10px 0; +} + +p.topic-title { + font-size: 1.1em; + font-weight: bold; + margin-top: 10px; +} + +/* -- admonitions ----------------------------------------------------------- */ + +div.admonition { + margin-top: 10px; + margin-bottom: 10px; + padding: 7px; +} + +div.admonition dt { + font-weight: bold; +} + +p.admonition-title { + margin: 0px 10px 5px 0px; + font-weight: bold; +} + +div.body p.centered { + text-align: center; + margin-top: 25px; +} + +/* -- content of sidebars/topics/admonitions -------------------------------- */ + +div.sidebar > :last-child, +aside.sidebar > :last-child, +nav.contents > :last-child, +aside.topic > :last-child, +div.topic > :last-child, +div.admonition > :last-child { + margin-bottom: 0; +} + +div.sidebar::after, +aside.sidebar::after, +nav.contents::after, +aside.topic::after, +div.topic::after, +div.admonition::after, +blockquote::after { + display: block; + content: ''; + clear: both; +} + +/* -- tables ---------------------------------------------------------------- */ + +table.docutils { + margin-top: 10px; + margin-bottom: 10px; + border: 0; + border-collapse: collapse; +} + +table.align-center { + margin-left: auto; + margin-right: auto; +} + +table.align-default { + margin-left: auto; + margin-right: auto; +} + +table caption span.caption-number { + font-style: italic; +} + +table caption span.caption-text { +} + +table.docutils td, table.docutils th { + padding: 1px 8px 1px 5px; + border-top: 0; + border-left: 0; + border-right: 0; + border-bottom: 1px solid #aaa; +} + +th { + text-align: left; + padding-right: 5px; +} + +table.citation { + border-left: solid 1px gray; + margin-left: 1px; +} + +table.citation td { + border-bottom: none; +} + +th > :first-child, +td > :first-child { + margin-top: 0px; +} + +th > :last-child, +td > :last-child { + margin-bottom: 0px; +} + +/* -- figures --------------------------------------------------------------- */ + +div.figure, figure { + margin: 0.5em; + padding: 0.5em; +} + +div.figure p.caption, figcaption { + padding: 0.3em; +} + +div.figure p.caption span.caption-number, +figcaption span.caption-number { + font-style: italic; +} + +div.figure p.caption span.caption-text, +figcaption span.caption-text { +} + +/* -- field list styles ----------------------------------------------------- */ + +table.field-list td, table.field-list th { + border: 0 !important; +} + +.field-list ul { + margin: 0; + padding-left: 1em; +} + +.field-list p { + margin: 0; +} + +.field-name { + -moz-hyphens: manual; + -ms-hyphens: manual; + -webkit-hyphens: manual; + hyphens: manual; +} + +/* -- hlist styles ---------------------------------------------------------- */ + +table.hlist { + margin: 1em 0; +} + +table.hlist td { + vertical-align: top; +} + +/* -- object description styles --------------------------------------------- */ + +.sig { + font-family: 'Consolas', 'Menlo', 'DejaVu Sans Mono', 'Bitstream Vera Sans Mono', monospace; +} + +.sig-name, code.descname { + background-color: transparent; + font-weight: bold; +} + +.sig-name { + font-size: 1.1em; +} + +code.descname { + font-size: 1.2em; +} + +.sig-prename, code.descclassname { + background-color: transparent; +} + +.optional { + font-size: 1.3em; +} + +.sig-paren { + font-size: larger; +} + +.sig-param.n { + font-style: italic; +} + +/* C++ specific styling */ + +.sig-inline.c-texpr, +.sig-inline.cpp-texpr { + font-family: unset; +} + +.sig.c .k, .sig.c .kt, +.sig.cpp .k, .sig.cpp .kt { + color: #0033B3; +} + +.sig.c .m, +.sig.cpp .m { + color: #1750EB; +} + +.sig.c .s, .sig.c .sc, +.sig.cpp .s, .sig.cpp .sc { + color: #067D17; +} + + +/* -- other body styles ----------------------------------------------------- */ + +ol.arabic { + list-style: decimal; +} + +ol.loweralpha { + list-style: lower-alpha; +} + +ol.upperalpha { + list-style: upper-alpha; +} + +ol.lowerroman { + list-style: lower-roman; +} + +ol.upperroman { + list-style: upper-roman; +} + +:not(li) > ol > li:first-child > :first-child, +:not(li) > ul > li:first-child > :first-child { + margin-top: 0px; +} + +:not(li) > ol > li:last-child > :last-child, +:not(li) > ul > li:last-child > :last-child { + margin-bottom: 0px; +} + +ol.simple ol p, +ol.simple ul p, +ul.simple ol p, +ul.simple ul p { + margin-top: 0; +} + +ol.simple > li:not(:first-child) > p, +ul.simple > li:not(:first-child) > p { + margin-top: 0; +} + +ol.simple p, +ul.simple p { + margin-bottom: 0; +} + +aside.footnote > span, +div.citation > span { + float: left; +} +aside.footnote > span:last-of-type, +div.citation > span:last-of-type { + padding-right: 0.5em; +} +aside.footnote > p { + margin-left: 2em; +} +div.citation > p { + margin-left: 4em; +} +aside.footnote > p:last-of-type, +div.citation > p:last-of-type { + margin-bottom: 0em; +} +aside.footnote > p:last-of-type:after, +div.citation > p:last-of-type:after { + content: ""; + clear: both; +} + +dl.field-list { + display: grid; + grid-template-columns: fit-content(30%) auto; +} + +dl.field-list > dt { + font-weight: bold; + word-break: break-word; + padding-left: 0.5em; + padding-right: 5px; +} + +dl.field-list > dd { + padding-left: 0.5em; + margin-top: 0em; + margin-left: 0em; + margin-bottom: 0em; +} + +dl { + margin-bottom: 15px; +} + +dd > :first-child { + margin-top: 0px; +} + +dd ul, dd table { + margin-bottom: 10px; +} + +dd { + margin-top: 3px; + margin-bottom: 10px; + margin-left: 30px; +} + +.sig dd { + margin-top: 0px; + margin-bottom: 0px; +} + +.sig dl { + margin-top: 0px; + margin-bottom: 0px; +} + +dl > dd:last-child, +dl > dd:last-child > :last-child { + margin-bottom: 0; +} + +dt:target, span.highlighted { + background-color: #fbe54e; +} + +rect.highlighted { + fill: #fbe54e; +} + +dl.glossary dt { + font-weight: bold; + font-size: 1.1em; +} + +.versionmodified { + font-style: italic; +} + +.system-message { + background-color: #fda; + padding: 5px; + border: 3px solid red; +} + +.footnote:target { + background-color: #ffa; +} + +.line-block { + display: block; + margin-top: 1em; + margin-bottom: 1em; +} + +.line-block .line-block { + margin-top: 0; + margin-bottom: 0; + margin-left: 1.5em; +} + +.guilabel, .menuselection { + font-family: sans-serif; +} + +.accelerator { + text-decoration: underline; +} + +.classifier { + font-style: oblique; +} + +.classifier:before { + font-style: normal; + margin: 0 0.5em; + content: ":"; + display: inline-block; +} + +abbr, acronym { + border-bottom: dotted 1px; + cursor: help; +} + +/* -- code displays --------------------------------------------------------- */ + +pre { + overflow: auto; + overflow-y: hidden; /* fixes display issues on Chrome browsers */ +} + +pre, div[class*="highlight-"] { + clear: both; +} + +span.pre { + -moz-hyphens: none; + -ms-hyphens: none; + -webkit-hyphens: none; + hyphens: none; + white-space: nowrap; +} + +div[class*="highlight-"] { + margin: 1em 0; +} + +td.linenos pre { + border: 0; + background-color: transparent; + color: #aaa; +} + +table.highlighttable { + display: block; +} + +table.highlighttable tbody { + display: block; +} + +table.highlighttable tr { + display: flex; +} + +table.highlighttable td { + margin: 0; + padding: 0; +} + +table.highlighttable td.linenos { + padding-right: 0.5em; +} + +table.highlighttable td.code { + flex: 1; + overflow: hidden; +} + +.highlight .hll { + display: block; +} + +div.highlight pre, +table.highlighttable pre { + margin: 0; +} + +div.code-block-caption + div { + margin-top: 0; +} + +div.code-block-caption { + margin-top: 1em; + padding: 2px 5px; + font-size: small; +} + +div.code-block-caption code { + background-color: transparent; +} + +table.highlighttable td.linenos, +span.linenos, +div.highlight span.gp { /* gp: Generic.Prompt */ + user-select: none; + -webkit-user-select: text; /* Safari fallback only */ + -webkit-user-select: none; /* Chrome/Safari */ + -moz-user-select: none; /* Firefox */ + -ms-user-select: none; /* IE10+ */ +} + +div.code-block-caption span.caption-number { + padding: 0.1em 0.3em; + font-style: italic; +} + +div.code-block-caption span.caption-text { +} + +div.literal-block-wrapper { + margin: 1em 0; +} + +code.xref, a code { + background-color: transparent; + font-weight: bold; +} + +h1 code, h2 code, h3 code, h4 code, h5 code, h6 code { + background-color: transparent; +} + +.viewcode-link { + float: right; +} + +.viewcode-back { + float: right; + font-family: sans-serif; +} + +div.viewcode-block:target { + margin: -1px -10px; + padding: 0 10px; +} + +/* -- math display ---------------------------------------------------------- */ + +img.math { + vertical-align: middle; +} + +div.body div.math p { + text-align: center; +} + +span.eqno { + float: right; +} + +span.eqno a.headerlink { + position: absolute; + z-index: 1; +} + +div.math:hover a.headerlink { + visibility: visible; +} + +/* -- printout stylesheet --------------------------------------------------- */ + +@media print { + div.document, + div.documentwrapper, + div.bodywrapper { + margin: 0 !important; + width: 100%; + } + + div.sphinxsidebar, + div.related, + div.footer, + #top-link { + display: none; + } +} \ No newline at end of file diff --git a/docs/html/_static/custom.css b/docs/html/_static/custom.css new file mode 100644 index 0000000..2a924f1 --- /dev/null +++ b/docs/html/_static/custom.css @@ -0,0 +1 @@ +/* This file intentionally left blank. */ diff --git a/docs/html/_static/doctools.js b/docs/html/_static/doctools.js new file mode 100644 index 0000000..0398ebb --- /dev/null +++ b/docs/html/_static/doctools.js @@ -0,0 +1,149 @@ +/* + * Base JavaScript utilities for all Sphinx HTML documentation. + */ +"use strict"; + +const BLACKLISTED_KEY_CONTROL_ELEMENTS = new Set([ + "TEXTAREA", + "INPUT", + "SELECT", + "BUTTON", +]); + +const _ready = (callback) => { + if (document.readyState !== "loading") { + callback(); + } else { + document.addEventListener("DOMContentLoaded", callback); + } +}; + +/** + * Small JavaScript module for the documentation. + */ +const Documentation = { + init: () => { + Documentation.initDomainIndexTable(); + Documentation.initOnKeyListeners(); + }, + + /** + * i18n support + */ + TRANSLATIONS: {}, + PLURAL_EXPR: (n) => (n === 1 ? 0 : 1), + LOCALE: "unknown", + + // gettext and ngettext don't access this so that the functions + // can safely bound to a different name (_ = Documentation.gettext) + gettext: (string) => { + const translated = Documentation.TRANSLATIONS[string]; + switch (typeof translated) { + case "undefined": + return string; // no translation + case "string": + return translated; // translation exists + default: + return translated[0]; // (singular, plural) translation tuple exists + } + }, + + ngettext: (singular, plural, n) => { + const translated = Documentation.TRANSLATIONS[singular]; + if (typeof translated !== "undefined") + return translated[Documentation.PLURAL_EXPR(n)]; + return n === 1 ? singular : plural; + }, + + addTranslations: (catalog) => { + Object.assign(Documentation.TRANSLATIONS, catalog.messages); + Documentation.PLURAL_EXPR = new Function( + "n", + `return (${catalog.plural_expr})` + ); + Documentation.LOCALE = catalog.locale; + }, + + /** + * helper function to focus on search bar + */ + focusSearchBar: () => { + document.querySelectorAll("input[name=q]")[0]?.focus(); + }, + + /** + * Initialise the domain index toggle buttons + */ + initDomainIndexTable: () => { + const toggler = (el) => { + const idNumber = el.id.substr(7); + const toggledRows = document.querySelectorAll(`tr.cg-${idNumber}`); + if (el.src.substr(-9) === "minus.png") { + el.src = `${el.src.substr(0, el.src.length - 9)}plus.png`; + toggledRows.forEach((el) => (el.style.display = "none")); + } else { + el.src = `${el.src.substr(0, el.src.length - 8)}minus.png`; + toggledRows.forEach((el) => (el.style.display = "")); + } + }; + + const togglerElements = document.querySelectorAll("img.toggler"); + togglerElements.forEach((el) => + el.addEventListener("click", (event) => toggler(event.currentTarget)) + ); + togglerElements.forEach((el) => (el.style.display = "")); + if (DOCUMENTATION_OPTIONS.COLLAPSE_INDEX) togglerElements.forEach(toggler); + }, + + initOnKeyListeners: () => { + // only install a listener if it is really needed + if ( + !DOCUMENTATION_OPTIONS.NAVIGATION_WITH_KEYS && + !DOCUMENTATION_OPTIONS.ENABLE_SEARCH_SHORTCUTS + ) + return; + + document.addEventListener("keydown", (event) => { + // bail for input elements + if (BLACKLISTED_KEY_CONTROL_ELEMENTS.has(document.activeElement.tagName)) return; + // bail with special keys + if (event.altKey || event.ctrlKey || event.metaKey) return; + + if (!event.shiftKey) { + switch (event.key) { + case "ArrowLeft": + if (!DOCUMENTATION_OPTIONS.NAVIGATION_WITH_KEYS) break; + + const prevLink = document.querySelector('link[rel="prev"]'); + if (prevLink && prevLink.href) { + window.location.href = prevLink.href; + event.preventDefault(); + } + break; + case "ArrowRight": + if (!DOCUMENTATION_OPTIONS.NAVIGATION_WITH_KEYS) break; + + const nextLink = document.querySelector('link[rel="next"]'); + if (nextLink && nextLink.href) { + window.location.href = nextLink.href; + event.preventDefault(); + } + break; + } + } + + // some keyboard layouts may need Shift to get / + switch (event.key) { + case "/": + if (!DOCUMENTATION_OPTIONS.ENABLE_SEARCH_SHORTCUTS) break; + Documentation.focusSearchBar(); + event.preventDefault(); + } + }); + }, +}; + +// quick alias for translations +const _ = Documentation.gettext; + +_ready(Documentation.init); diff --git a/docs/html/_static/documentation_options.js b/docs/html/_static/documentation_options.js new file mode 100644 index 0000000..57fc22d --- /dev/null +++ b/docs/html/_static/documentation_options.js @@ -0,0 +1,13 @@ +const DOCUMENTATION_OPTIONS = { + VERSION: '0.1', + LANGUAGE: 'zh', + COLLAPSE_INDEX: false, + BUILDER: 'html', + FILE_SUFFIX: '.html', + LINK_SUFFIX: '.html', + HAS_SOURCE: true, + SOURCELINK_SUFFIX: '.txt', + NAVIGATION_WITH_KEYS: false, + SHOW_SEARCH_SUMMARY: true, + ENABLE_SEARCH_SHORTCUTS: true, +}; \ No newline at end of file diff --git a/docs/html/_static/file.png b/docs/html/_static/file.png new file mode 100644 index 0000000..a858a41 Binary files /dev/null and b/docs/html/_static/file.png differ diff --git a/docs/html/_static/github-banner.svg b/docs/html/_static/github-banner.svg new file mode 100644 index 0000000..c47d9dc --- /dev/null +++ b/docs/html/_static/github-banner.svg @@ -0,0 +1,5 @@ + + + + + diff --git a/docs/html/_static/language_data.js b/docs/html/_static/language_data.js new file mode 100644 index 0000000..c7fe6c6 --- /dev/null +++ b/docs/html/_static/language_data.js @@ -0,0 +1,192 @@ +/* + * This script contains the language-specific data used by searchtools.js, + * namely the list of stopwords, stemmer, scorer and splitter. + */ + +var stopwords = ["a", "and", "are", "as", "at", "be", "but", "by", "for", "if", "in", "into", "is", "it", "near", "no", "not", "of", "on", "or", "such", "that", "the", "their", "then", "there", "these", "they", "this", "to", "was", "will", "with"]; + + +/* Non-minified version is copied as a separate JS file, if available */ + +/** + * Porter Stemmer + */ +var Stemmer = function() { + + var step2list = { + ational: 'ate', + tional: 'tion', + enci: 'ence', + anci: 'ance', + izer: 'ize', + bli: 'ble', + alli: 'al', + entli: 'ent', + eli: 'e', + ousli: 'ous', + ization: 'ize', + ation: 'ate', + ator: 'ate', + alism: 'al', + iveness: 'ive', + fulness: 'ful', + ousness: 'ous', + aliti: 'al', + iviti: 'ive', + biliti: 'ble', + logi: 'log' + }; + + var step3list = { + icate: 'ic', + ative: '', + alize: 'al', + iciti: 'ic', + ical: 'ic', + ful: '', + ness: '' + }; + + var c = "[^aeiou]"; // consonant + var v = "[aeiouy]"; // vowel + var C = c + "[^aeiouy]*"; // consonant sequence + var V = v + "[aeiou]*"; // vowel sequence + + var mgr0 = "^(" + C + ")?" + V + C; // [C]VC... is m>0 + var meq1 = "^(" + C + ")?" + V + C + "(" + V + ")?$"; // [C]VC[V] is m=1 + var mgr1 = "^(" + C + ")?" + V + C + V + C; // [C]VCVC... is m>1 + var s_v = "^(" + C + ")?" + v; // vowel in stem + + this.stemWord = function (w) { + var stem; + var suffix; + var firstch; + var origword = w; + + if (w.length < 3) + return w; + + var re; + var re2; + var re3; + var re4; + + firstch = w.substr(0,1); + if (firstch == "y") + w = firstch.toUpperCase() + w.substr(1); + + // Step 1a + re = /^(.+?)(ss|i)es$/; + re2 = /^(.+?)([^s])s$/; + + if (re.test(w)) + w = w.replace(re,"$1$2"); + else if (re2.test(w)) + w = w.replace(re2,"$1$2"); + + // Step 1b + re = /^(.+?)eed$/; + re2 = /^(.+?)(ed|ing)$/; + if (re.test(w)) { + var fp = re.exec(w); + re = new RegExp(mgr0); + if (re.test(fp[1])) { + re = /.$/; + w = w.replace(re,""); + } + } + else if (re2.test(w)) { + var fp = re2.exec(w); + stem = fp[1]; + re2 = new RegExp(s_v); + if (re2.test(stem)) { + w = stem; + re2 = /(at|bl|iz)$/; + re3 = new RegExp("([^aeiouylsz])\\1$"); + re4 = new RegExp("^" + C + v + "[^aeiouwxy]$"); + if (re2.test(w)) + w = w + "e"; + else if (re3.test(w)) { + re = /.$/; + w = w.replace(re,""); + } + else if (re4.test(w)) + w = w + "e"; + } + } + + // Step 1c + re = /^(.+?)y$/; + if (re.test(w)) { + var fp = re.exec(w); + stem = fp[1]; + re = new RegExp(s_v); + if (re.test(stem)) + w = stem + "i"; + } + + // Step 2 + re = /^(.+?)(ational|tional|enci|anci|izer|bli|alli|entli|eli|ousli|ization|ation|ator|alism|iveness|fulness|ousness|aliti|iviti|biliti|logi)$/; + if (re.test(w)) { + var fp = re.exec(w); + stem = fp[1]; + suffix = fp[2]; + re = new RegExp(mgr0); + if (re.test(stem)) + w = stem + step2list[suffix]; + } + + // Step 3 + re = /^(.+?)(icate|ative|alize|iciti|ical|ful|ness)$/; + if (re.test(w)) { + var fp = re.exec(w); + stem = fp[1]; + suffix = fp[2]; + re = new RegExp(mgr0); + if (re.test(stem)) + w = stem + step3list[suffix]; + } + + // Step 4 + re = /^(.+?)(al|ance|ence|er|ic|able|ible|ant|ement|ment|ent|ou|ism|ate|iti|ous|ive|ize)$/; + re2 = /^(.+?)(s|t)(ion)$/; + if (re.test(w)) { + var fp = re.exec(w); + stem = fp[1]; + re = new RegExp(mgr1); + if (re.test(stem)) + w = stem; + } + else if (re2.test(w)) { + var fp = re2.exec(w); + stem = fp[1] + fp[2]; + re2 = new RegExp(mgr1); + if (re2.test(stem)) + w = stem; + } + + // Step 5 + re = /^(.+?)e$/; + if (re.test(w)) { + var fp = re.exec(w); + stem = fp[1]; + re = new RegExp(mgr1); + re2 = new RegExp(meq1); + re3 = new RegExp("^" + C + v + "[^aeiouwxy]$"); + if (re.test(stem) || (re2.test(stem) && !(re3.test(stem)))) + w = stem; + } + re = /ll$/; + re2 = new RegExp(mgr1); + if (re.test(w) && re2.test(w)) { + re = /.$/; + w = w.replace(re,""); + } + + // and turn initial Y back to y + if (firstch == "y") + w = firstch.toLowerCase() + w.substr(1); + return w; + } +} + diff --git a/docs/html/_static/minus.png b/docs/html/_static/minus.png new file mode 100644 index 0000000..d96755f Binary files /dev/null and b/docs/html/_static/minus.png differ diff --git a/docs/html/_static/plus.png b/docs/html/_static/plus.png new file mode 100644 index 0000000..7107cec Binary files /dev/null and b/docs/html/_static/plus.png differ diff --git a/docs/html/_static/pygments.css b/docs/html/_static/pygments.css new file mode 100644 index 0000000..9392ddc --- /dev/null +++ b/docs/html/_static/pygments.css @@ -0,0 +1,84 @@ +pre { line-height: 125%; } +td.linenos .normal { color: inherit; background-color: transparent; padding-left: 5px; padding-right: 5px; } +span.linenos { color: inherit; background-color: transparent; padding-left: 5px; padding-right: 5px; } +td.linenos .special { color: #000000; background-color: #ffffc0; padding-left: 5px; padding-right: 5px; } +span.linenos.special { color: #000000; background-color: #ffffc0; padding-left: 5px; padding-right: 5px; } +.highlight .hll { background-color: #ffffcc } +.highlight { background: #f8f8f8; } +.highlight .c { color: #8F5902; font-style: italic } /* Comment */ +.highlight .err { color: #A40000; border: 1px solid #EF2929 } /* Error */ +.highlight .g { color: #000 } /* Generic */ +.highlight .k { color: #004461; font-weight: bold } /* Keyword */ +.highlight .l { color: #000 } /* Literal */ +.highlight .n { color: #000 } /* Name */ +.highlight .o { color: #582800 } /* Operator */ +.highlight .x { color: #000 } /* Other */ +.highlight .p { color: #000; font-weight: bold } /* Punctuation */ +.highlight .ch { color: #8F5902; font-style: italic } /* Comment.Hashbang */ +.highlight .cm { color: #8F5902; font-style: italic } /* Comment.Multiline */ +.highlight .cp { color: #8F5902 } /* Comment.Preproc */ +.highlight .cpf { color: #8F5902; font-style: italic } /* Comment.PreprocFile */ +.highlight .c1 { color: #8F5902; font-style: italic } /* Comment.Single */ +.highlight .cs { color: #8F5902; font-style: italic } /* Comment.Special */ +.highlight .gd { color: #A40000 } /* Generic.Deleted */ +.highlight .ge { color: #000; font-style: italic } /* Generic.Emph */ +.highlight .ges { color: #000 } /* Generic.EmphStrong */ +.highlight .gr { color: #EF2929 } /* Generic.Error */ +.highlight .gh { color: #000080; font-weight: bold } /* Generic.Heading */ +.highlight .gi { color: #00A000 } /* Generic.Inserted */ +.highlight .go { color: #888 } /* Generic.Output */ +.highlight .gp { color: #745334 } /* Generic.Prompt */ +.highlight .gs { color: #000; font-weight: bold } /* Generic.Strong */ +.highlight .gu { color: #800080; font-weight: bold } /* Generic.Subheading */ +.highlight .gt { color: #A40000; font-weight: bold } /* Generic.Traceback */ +.highlight .kc { color: #004461; font-weight: bold } /* Keyword.Constant */ +.highlight .kd { color: #004461; font-weight: bold } /* Keyword.Declaration */ +.highlight .kn { color: #004461; font-weight: bold } /* Keyword.Namespace */ +.highlight .kp { color: #004461; font-weight: bold } /* Keyword.Pseudo */ +.highlight .kr { color: #004461; font-weight: bold } /* Keyword.Reserved */ +.highlight .kt { color: #004461; font-weight: bold } /* Keyword.Type */ +.highlight .ld { color: #000 } /* Literal.Date */ +.highlight .m { color: #900 } /* Literal.Number */ +.highlight .s { color: #4E9A06 } /* Literal.String */ +.highlight .na { color: #C4A000 } /* Name.Attribute */ +.highlight .nb { color: #004461 } /* Name.Builtin */ +.highlight .nc { color: #000 } /* Name.Class */ +.highlight .no { color: #000 } /* Name.Constant */ +.highlight .nd { color: #888 } /* Name.Decorator */ +.highlight .ni { color: #CE5C00 } /* Name.Entity */ +.highlight .ne { color: #C00; font-weight: bold } /* Name.Exception */ +.highlight .nf { color: #000 } /* Name.Function */ +.highlight .nl { color: #F57900 } /* Name.Label */ +.highlight .nn { color: #000 } /* Name.Namespace */ +.highlight .nx { color: #000 } /* Name.Other */ +.highlight .py { color: #000 } /* Name.Property */ +.highlight .nt { color: #004461; font-weight: bold } /* Name.Tag */ +.highlight .nv { color: #000 } /* Name.Variable */ +.highlight .ow { color: #004461; font-weight: bold } /* Operator.Word */ +.highlight .pm { color: #000; font-weight: bold } /* Punctuation.Marker */ +.highlight .w { color: #F8F8F8 } /* Text.Whitespace */ +.highlight .mb { color: #900 } /* Literal.Number.Bin */ +.highlight .mf { color: #900 } /* Literal.Number.Float */ +.highlight .mh { color: #900 } /* Literal.Number.Hex */ +.highlight .mi { color: #900 } /* Literal.Number.Integer */ +.highlight .mo { color: #900 } /* Literal.Number.Oct */ +.highlight .sa { color: #4E9A06 } /* Literal.String.Affix */ +.highlight .sb { color: #4E9A06 } /* Literal.String.Backtick */ +.highlight .sc { color: #4E9A06 } /* Literal.String.Char */ +.highlight .dl { color: #4E9A06 } /* Literal.String.Delimiter */ +.highlight .sd { color: #8F5902; font-style: italic } /* Literal.String.Doc */ +.highlight .s2 { color: #4E9A06 } /* Literal.String.Double */ +.highlight .se { color: #4E9A06 } /* Literal.String.Escape */ +.highlight .sh { color: #4E9A06 } /* Literal.String.Heredoc */ +.highlight .si { color: #4E9A06 } /* Literal.String.Interpol */ +.highlight .sx { color: #4E9A06 } /* Literal.String.Other */ +.highlight .sr { color: #4E9A06 } /* Literal.String.Regex */ +.highlight .s1 { color: #4E9A06 } /* Literal.String.Single */ +.highlight .ss { color: #4E9A06 } /* Literal.String.Symbol */ +.highlight .bp { color: #3465A4 } /* Name.Builtin.Pseudo */ +.highlight .fm { color: #000 } /* Name.Function.Magic */ +.highlight .vc { color: #000 } /* Name.Variable.Class */ +.highlight .vg { color: #000 } /* Name.Variable.Global */ +.highlight .vi { color: #000 } /* Name.Variable.Instance */ +.highlight .vm { color: #000 } /* Name.Variable.Magic */ +.highlight .il { color: #900 } /* Literal.Number.Integer.Long */ \ No newline at end of file diff --git a/docs/html/_static/searchtools.js b/docs/html/_static/searchtools.js new file mode 100644 index 0000000..91f4be5 --- /dev/null +++ b/docs/html/_static/searchtools.js @@ -0,0 +1,635 @@ +/* + * Sphinx JavaScript utilities for the full-text search. + */ +"use strict"; + +/** + * Simple result scoring code. + */ +if (typeof Scorer === "undefined") { + var Scorer = { + // Implement the following function to further tweak the score for each result + // The function takes a result array [docname, title, anchor, descr, score, filename] + // and returns the new score. + /* + score: result => { + const [docname, title, anchor, descr, score, filename, kind] = result + return score + }, + */ + + // query matches the full name of an object + objNameMatch: 11, + // or matches in the last dotted part of the object name + objPartialMatch: 6, + // Additive scores depending on the priority of the object + objPrio: { + 0: 15, // used to be importantResults + 1: 5, // used to be objectResults + 2: -5, // used to be unimportantResults + }, + // Used when the priority is not in the mapping. + objPrioDefault: 0, + + // query found in title + title: 15, + partialTitle: 7, + // query found in terms + term: 5, + partialTerm: 2, + }; +} + +// Global search result kind enum, used by themes to style search results. +class SearchResultKind { + static get index() { return "index"; } + static get object() { return "object"; } + static get text() { return "text"; } + static get title() { return "title"; } +} + +const _removeChildren = (element) => { + while (element && element.lastChild) element.removeChild(element.lastChild); +}; + +/** + * See https://developer.mozilla.org/en-US/docs/Web/JavaScript/Guide/Regular_Expressions#escaping + */ +const _escapeRegExp = (string) => + string.replace(/[.*+\-?^${}()|[\]\\]/g, "\\$&"); // $& means the whole matched string + +const _displayItem = (item, searchTerms, highlightTerms) => { + const docBuilder = DOCUMENTATION_OPTIONS.BUILDER; + const docFileSuffix = DOCUMENTATION_OPTIONS.FILE_SUFFIX; + const docLinkSuffix = DOCUMENTATION_OPTIONS.LINK_SUFFIX; + const showSearchSummary = DOCUMENTATION_OPTIONS.SHOW_SEARCH_SUMMARY; + const contentRoot = document.documentElement.dataset.content_root; + + const [docName, title, anchor, descr, score, _filename, kind] = item; + + let listItem = document.createElement("li"); + // Add a class representing the item's type: + // can be used by a theme's CSS selector for styling + // See SearchResultKind for the class names. + listItem.classList.add(`kind-${kind}`); + let requestUrl; + let linkUrl; + if (docBuilder === "dirhtml") { + // dirhtml builder + let dirname = docName + "/"; + if (dirname.match(/\/index\/$/)) + dirname = dirname.substring(0, dirname.length - 6); + else if (dirname === "index/") dirname = ""; + requestUrl = contentRoot + dirname; + linkUrl = requestUrl; + } else { + // normal html builders + requestUrl = contentRoot + docName + docFileSuffix; + linkUrl = docName + docLinkSuffix; + } + let linkEl = listItem.appendChild(document.createElement("a")); + linkEl.href = linkUrl + anchor; + linkEl.dataset.score = score; + linkEl.innerHTML = title; + if (descr) { + listItem.appendChild(document.createElement("span")).innerHTML = + " (" + descr + ")"; + // highlight search terms in the description + if (SPHINX_HIGHLIGHT_ENABLED) // set in sphinx_highlight.js + highlightTerms.forEach((term) => _highlightText(listItem, term, "highlighted")); + } + else if (showSearchSummary) + fetch(requestUrl) + .then((responseData) => responseData.text()) + .then((data) => { + if (data) + listItem.appendChild( + Search.makeSearchSummary(data, searchTerms, anchor) + ); + // highlight search terms in the summary + if (SPHINX_HIGHLIGHT_ENABLED) // set in sphinx_highlight.js + highlightTerms.forEach((term) => _highlightText(listItem, term, "highlighted")); + }); + Search.output.appendChild(listItem); +}; +const _finishSearch = (resultCount) => { + Search.stopPulse(); + Search.title.innerText = _("Search Results"); + if (!resultCount) + Search.status.innerText = Documentation.gettext( + "Your search did not match any documents. Please make sure that all words are spelled correctly and that you've selected enough categories." + ); + else + Search.status.innerText = Documentation.ngettext( + "Search finished, found one page matching the search query.", + "Search finished, found ${resultCount} pages matching the search query.", + resultCount, + ).replace('${resultCount}', resultCount); +}; +const _displayNextItem = ( + results, + resultCount, + searchTerms, + highlightTerms, +) => { + // results left, load the summary and display it + // this is intended to be dynamic (don't sub resultsCount) + if (results.length) { + _displayItem(results.pop(), searchTerms, highlightTerms); + setTimeout( + () => _displayNextItem(results, resultCount, searchTerms, highlightTerms), + 5 + ); + } + // search finished, update title and status message + else _finishSearch(resultCount); +}; +// Helper function used by query() to order search results. +// Each input is an array of [docname, title, anchor, descr, score, filename, kind]. +// Order the results by score (in opposite order of appearance, since the +// `_displayNextItem` function uses pop() to retrieve items) and then alphabetically. +const _orderResultsByScoreThenName = (a, b) => { + const leftScore = a[4]; + const rightScore = b[4]; + if (leftScore === rightScore) { + // same score: sort alphabetically + const leftTitle = a[1].toLowerCase(); + const rightTitle = b[1].toLowerCase(); + if (leftTitle === rightTitle) return 0; + return leftTitle > rightTitle ? -1 : 1; // inverted is intentional + } + return leftScore > rightScore ? 1 : -1; +}; + +/** + * Default splitQuery function. Can be overridden in ``sphinx.search`` with a + * custom function per language. + * + * The regular expression works by splitting the string on consecutive characters + * that are not Unicode letters, numbers, underscores, or emoji characters. + * This is the same as ``\W+`` in Python, preserving the surrogate pair area. + */ +if (typeof splitQuery === "undefined") { + var splitQuery = (query) => query + .split(/[^\p{Letter}\p{Number}_\p{Emoji_Presentation}]+/gu) + .filter(term => term) // remove remaining empty strings +} + +/** + * Search Module + */ +const Search = { + _index: null, + _queued_query: null, + _pulse_status: -1, + + htmlToText: (htmlString, anchor) => { + const htmlElement = new DOMParser().parseFromString(htmlString, 'text/html'); + for (const removalQuery of [".headerlink", "script", "style"]) { + htmlElement.querySelectorAll(removalQuery).forEach((el) => { el.remove() }); + } + if (anchor) { + const anchorContent = htmlElement.querySelector(`[role="main"] ${anchor}`); + if (anchorContent) return anchorContent.textContent; + + console.warn( + `Anchored content block not found. Sphinx search tries to obtain it via DOM query '[role=main] ${anchor}'. Check your theme or template.` + ); + } + + // if anchor not specified or not found, fall back to main content + const docContent = htmlElement.querySelector('[role="main"]'); + if (docContent) return docContent.textContent; + + console.warn( + "Content block not found. Sphinx search tries to obtain it via DOM query '[role=main]'. Check your theme or template." + ); + return ""; + }, + + init: () => { + const query = new URLSearchParams(window.location.search).get("q"); + document + .querySelectorAll('input[name="q"]') + .forEach((el) => (el.value = query)); + if (query) Search.performSearch(query); + }, + + loadIndex: (url) => + (document.body.appendChild(document.createElement("script")).src = url), + + setIndex: (index) => { + Search._index = index; + if (Search._queued_query !== null) { + const query = Search._queued_query; + Search._queued_query = null; + Search.query(query); + } + }, + + hasIndex: () => Search._index !== null, + + deferQuery: (query) => (Search._queued_query = query), + + stopPulse: () => (Search._pulse_status = -1), + + startPulse: () => { + if (Search._pulse_status >= 0) return; + + const pulse = () => { + Search._pulse_status = (Search._pulse_status + 1) % 4; + Search.dots.innerText = ".".repeat(Search._pulse_status); + if (Search._pulse_status >= 0) window.setTimeout(pulse, 500); + }; + pulse(); + }, + + /** + * perform a search for something (or wait until index is loaded) + */ + performSearch: (query) => { + // create the required interface elements + const searchText = document.createElement("h2"); + searchText.textContent = _("Searching"); + const searchSummary = document.createElement("p"); + searchSummary.classList.add("search-summary"); + searchSummary.innerText = ""; + const searchList = document.createElement("ul"); + searchList.setAttribute("role", "list"); + searchList.classList.add("search"); + + const out = document.getElementById("search-results"); + Search.title = out.appendChild(searchText); + Search.dots = Search.title.appendChild(document.createElement("span")); + Search.status = out.appendChild(searchSummary); + Search.output = out.appendChild(searchList); + + const searchProgress = document.getElementById("search-progress"); + // Some themes don't use the search progress node + if (searchProgress) { + searchProgress.innerText = _("Preparing search..."); + } + Search.startPulse(); + + // index already loaded, the browser was quick! + if (Search.hasIndex()) Search.query(query); + else Search.deferQuery(query); + }, + + _parseQuery: (query) => { + // stem the search terms and add them to the correct list + const stemmer = new Stemmer(); + const searchTerms = new Set(); + const excludedTerms = new Set(); + const highlightTerms = new Set(); + const objectTerms = new Set(splitQuery(query.toLowerCase().trim())); + splitQuery(query.trim()).forEach((queryTerm) => { + const queryTermLower = queryTerm.toLowerCase(); + + // maybe skip this "word" + // stopwords array is from language_data.js + if ( + stopwords.indexOf(queryTermLower) !== -1 || + queryTerm.match(/^\d+$/) + ) + return; + + // stem the word + let word = stemmer.stemWord(queryTermLower); + // select the correct list + if (word[0] === "-") excludedTerms.add(word.substr(1)); + else { + searchTerms.add(word); + highlightTerms.add(queryTermLower); + } + }); + + if (SPHINX_HIGHLIGHT_ENABLED) { // set in sphinx_highlight.js + localStorage.setItem("sphinx_highlight_terms", [...highlightTerms].join(" ")) + } + + // console.debug("SEARCH: searching for:"); + // console.info("required: ", [...searchTerms]); + // console.info("excluded: ", [...excludedTerms]); + + return [query, searchTerms, excludedTerms, highlightTerms, objectTerms]; + }, + + /** + * execute search (requires search index to be loaded) + */ + _performSearch: (query, searchTerms, excludedTerms, highlightTerms, objectTerms) => { + const filenames = Search._index.filenames; + const docNames = Search._index.docnames; + const titles = Search._index.titles; + const allTitles = Search._index.alltitles; + const indexEntries = Search._index.indexentries; + + // Collect multiple result groups to be sorted separately and then ordered. + // Each is an array of [docname, title, anchor, descr, score, filename, kind]. + const normalResults = []; + const nonMainIndexResults = []; + + _removeChildren(document.getElementById("search-progress")); + + const queryLower = query.toLowerCase().trim(); + for (const [title, foundTitles] of Object.entries(allTitles)) { + if (title.toLowerCase().trim().includes(queryLower) && (queryLower.length >= title.length/2)) { + for (const [file, id] of foundTitles) { + const score = Math.round(Scorer.title * queryLower.length / title.length); + const boost = titles[file] === title ? 1 : 0; // add a boost for document titles + normalResults.push([ + docNames[file], + titles[file] !== title ? `${titles[file]} > ${title}` : title, + id !== null ? "#" + id : "", + null, + score + boost, + filenames[file], + SearchResultKind.title, + ]); + } + } + } + + // search for explicit entries in index directives + for (const [entry, foundEntries] of Object.entries(indexEntries)) { + if (entry.includes(queryLower) && (queryLower.length >= entry.length/2)) { + for (const [file, id, isMain] of foundEntries) { + const score = Math.round(100 * queryLower.length / entry.length); + const result = [ + docNames[file], + titles[file], + id ? "#" + id : "", + null, + score, + filenames[file], + SearchResultKind.index, + ]; + if (isMain) { + normalResults.push(result); + } else { + nonMainIndexResults.push(result); + } + } + } + } + + // lookup as object + objectTerms.forEach((term) => + normalResults.push(...Search.performObjectSearch(term, objectTerms)) + ); + + // lookup as search terms in fulltext + normalResults.push(...Search.performTermsSearch(searchTerms, excludedTerms)); + + // let the scorer override scores with a custom scoring function + if (Scorer.score) { + normalResults.forEach((item) => (item[4] = Scorer.score(item))); + nonMainIndexResults.forEach((item) => (item[4] = Scorer.score(item))); + } + + // Sort each group of results by score and then alphabetically by name. + normalResults.sort(_orderResultsByScoreThenName); + nonMainIndexResults.sort(_orderResultsByScoreThenName); + + // Combine the result groups in (reverse) order. + // Non-main index entries are typically arbitrary cross-references, + // so display them after other results. + let results = [...nonMainIndexResults, ...normalResults]; + + // remove duplicate search results + // note the reversing of results, so that in the case of duplicates, the highest-scoring entry is kept + let seen = new Set(); + results = results.reverse().reduce((acc, result) => { + let resultStr = result.slice(0, 4).concat([result[5]]).map(v => String(v)).join(','); + if (!seen.has(resultStr)) { + acc.push(result); + seen.add(resultStr); + } + return acc; + }, []); + + return results.reverse(); + }, + + query: (query) => { + const [searchQuery, searchTerms, excludedTerms, highlightTerms, objectTerms] = Search._parseQuery(query); + const results = Search._performSearch(searchQuery, searchTerms, excludedTerms, highlightTerms, objectTerms); + + // for debugging + //Search.lastresults = results.slice(); // a copy + // console.info("search results:", Search.lastresults); + + // print the results + _displayNextItem(results, results.length, searchTerms, highlightTerms); + }, + + /** + * search for object names + */ + performObjectSearch: (object, objectTerms) => { + const filenames = Search._index.filenames; + const docNames = Search._index.docnames; + const objects = Search._index.objects; + const objNames = Search._index.objnames; + const titles = Search._index.titles; + + const results = []; + + const objectSearchCallback = (prefix, match) => { + const name = match[4] + const fullname = (prefix ? prefix + "." : "") + name; + const fullnameLower = fullname.toLowerCase(); + if (fullnameLower.indexOf(object) < 0) return; + + let score = 0; + const parts = fullnameLower.split("."); + + // check for different match types: exact matches of full name or + // "last name" (i.e. last dotted part) + if (fullnameLower === object || parts.slice(-1)[0] === object) + score += Scorer.objNameMatch; + else if (parts.slice(-1)[0].indexOf(object) > -1) + score += Scorer.objPartialMatch; // matches in last name + + const objName = objNames[match[1]][2]; + const title = titles[match[0]]; + + // If more than one term searched for, we require other words to be + // found in the name/title/description + const otherTerms = new Set(objectTerms); + otherTerms.delete(object); + if (otherTerms.size > 0) { + const haystack = `${prefix} ${name} ${objName} ${title}`.toLowerCase(); + if ( + [...otherTerms].some((otherTerm) => haystack.indexOf(otherTerm) < 0) + ) + return; + } + + let anchor = match[3]; + if (anchor === "") anchor = fullname; + else if (anchor === "-") anchor = objNames[match[1]][1] + "-" + fullname; + + const descr = objName + _(", in ") + title; + + // add custom score for some objects according to scorer + if (Scorer.objPrio.hasOwnProperty(match[2])) + score += Scorer.objPrio[match[2]]; + else score += Scorer.objPrioDefault; + + results.push([ + docNames[match[0]], + fullname, + "#" + anchor, + descr, + score, + filenames[match[0]], + SearchResultKind.object, + ]); + }; + Object.keys(objects).forEach((prefix) => + objects[prefix].forEach((array) => + objectSearchCallback(prefix, array) + ) + ); + return results; + }, + + /** + * search for full-text terms in the index + */ + performTermsSearch: (searchTerms, excludedTerms) => { + // prepare search + const terms = Search._index.terms; + const titleTerms = Search._index.titleterms; + const filenames = Search._index.filenames; + const docNames = Search._index.docnames; + const titles = Search._index.titles; + + const scoreMap = new Map(); + const fileMap = new Map(); + + // perform the search on the required terms + searchTerms.forEach((word) => { + const files = []; + // find documents, if any, containing the query word in their text/title term indices + // use Object.hasOwnProperty to avoid mismatching against prototype properties + const arr = [ + { files: terms.hasOwnProperty(word) ? terms[word] : undefined, score: Scorer.term }, + { files: titleTerms.hasOwnProperty(word) ? titleTerms[word] : undefined, score: Scorer.title }, + ]; + // add support for partial matches + if (word.length > 2) { + const escapedWord = _escapeRegExp(word); + if (!terms.hasOwnProperty(word)) { + Object.keys(terms).forEach((term) => { + if (term.match(escapedWord)) + arr.push({ files: terms[term], score: Scorer.partialTerm }); + }); + } + if (!titleTerms.hasOwnProperty(word)) { + Object.keys(titleTerms).forEach((term) => { + if (term.match(escapedWord)) + arr.push({ files: titleTerms[term], score: Scorer.partialTitle }); + }); + } + } + + // no match but word was a required one + if (arr.every((record) => record.files === undefined)) return; + + // found search word in contents + arr.forEach((record) => { + if (record.files === undefined) return; + + let recordFiles = record.files; + if (recordFiles.length === undefined) recordFiles = [recordFiles]; + files.push(...recordFiles); + + // set score for the word in each file + recordFiles.forEach((file) => { + if (!scoreMap.has(file)) scoreMap.set(file, new Map()); + const fileScores = scoreMap.get(file); + fileScores.set(word, record.score); + }); + }); + + // create the mapping + files.forEach((file) => { + if (!fileMap.has(file)) fileMap.set(file, [word]); + else if (fileMap.get(file).indexOf(word) === -1) fileMap.get(file).push(word); + }); + }); + + // now check if the files don't contain excluded terms + const results = []; + for (const [file, wordList] of fileMap) { + // check if all requirements are matched + + // as search terms with length < 3 are discarded + const filteredTermCount = [...searchTerms].filter( + (term) => term.length > 2 + ).length; + if ( + wordList.length !== searchTerms.size && + wordList.length !== filteredTermCount + ) + continue; + + // ensure that none of the excluded terms is in the search result + if ( + [...excludedTerms].some( + (term) => + terms[term] === file || + titleTerms[term] === file || + (terms[term] || []).includes(file) || + (titleTerms[term] || []).includes(file) + ) + ) + break; + + // select one (max) score for the file. + const score = Math.max(...wordList.map((w) => scoreMap.get(file).get(w))); + // add result to the result list + results.push([ + docNames[file], + titles[file], + "", + null, + score, + filenames[file], + SearchResultKind.text, + ]); + } + return results; + }, + + /** + * helper function to return a node containing the + * search summary for a given text. keywords is a list + * of stemmed words. + */ + makeSearchSummary: (htmlText, keywords, anchor) => { + const text = Search.htmlToText(htmlText, anchor); + if (text === "") return null; + + const textLower = text.toLowerCase(); + const actualStartPosition = [...keywords] + .map((k) => textLower.indexOf(k.toLowerCase())) + .filter((i) => i > -1) + .slice(-1)[0]; + const startWithContext = Math.max(actualStartPosition - 120, 0); + + const top = startWithContext === 0 ? "" : "..."; + const tail = startWithContext + 240 < text.length ? "..." : ""; + + let summary = document.createElement("p"); + summary.classList.add("context"); + summary.textContent = top + text.substr(startWithContext, 240).trim() + tail; + + return summary; + }, +}; + +_ready(Search.init); diff --git a/docs/html/_static/sphinx_highlight.js b/docs/html/_static/sphinx_highlight.js new file mode 100644 index 0000000..8a96c69 --- /dev/null +++ b/docs/html/_static/sphinx_highlight.js @@ -0,0 +1,154 @@ +/* Highlighting utilities for Sphinx HTML documentation. */ +"use strict"; + +const SPHINX_HIGHLIGHT_ENABLED = true + +/** + * highlight a given string on a node by wrapping it in + * span elements with the given class name. + */ +const _highlight = (node, addItems, text, className) => { + if (node.nodeType === Node.TEXT_NODE) { + const val = node.nodeValue; + const parent = node.parentNode; + const pos = val.toLowerCase().indexOf(text); + if ( + pos >= 0 && + !parent.classList.contains(className) && + !parent.classList.contains("nohighlight") + ) { + let span; + + const closestNode = parent.closest("body, svg, foreignObject"); + const isInSVG = closestNode && closestNode.matches("svg"); + if (isInSVG) { + span = document.createElementNS("http://www.w3.org/2000/svg", "tspan"); + } else { + span = document.createElement("span"); + span.classList.add(className); + } + + span.appendChild(document.createTextNode(val.substr(pos, text.length))); + const rest = document.createTextNode(val.substr(pos + text.length)); + parent.insertBefore( + span, + parent.insertBefore( + rest, + node.nextSibling + ) + ); + node.nodeValue = val.substr(0, pos); + /* There may be more occurrences of search term in this node. So call this + * function recursively on the remaining fragment. + */ + _highlight(rest, addItems, text, className); + + if (isInSVG) { + const rect = document.createElementNS( + "http://www.w3.org/2000/svg", + "rect" + ); + const bbox = parent.getBBox(); + rect.x.baseVal.value = bbox.x; + rect.y.baseVal.value = bbox.y; + rect.width.baseVal.value = bbox.width; + rect.height.baseVal.value = bbox.height; + rect.setAttribute("class", className); + addItems.push({ parent: parent, target: rect }); + } + } + } else if (node.matches && !node.matches("button, select, textarea")) { + node.childNodes.forEach((el) => _highlight(el, addItems, text, className)); + } +}; +const _highlightText = (thisNode, text, className) => { + let addItems = []; + _highlight(thisNode, addItems, text, className); + addItems.forEach((obj) => + obj.parent.insertAdjacentElement("beforebegin", obj.target) + ); +}; + +/** + * Small JavaScript module for the documentation. + */ +const SphinxHighlight = { + + /** + * highlight the search words provided in localstorage in the text + */ + highlightSearchWords: () => { + if (!SPHINX_HIGHLIGHT_ENABLED) return; // bail if no highlight + + // get and clear terms from localstorage + const url = new URL(window.location); + const highlight = + localStorage.getItem("sphinx_highlight_terms") + || url.searchParams.get("highlight") + || ""; + localStorage.removeItem("sphinx_highlight_terms") + url.searchParams.delete("highlight"); + window.history.replaceState({}, "", url); + + // get individual terms from highlight string + const terms = highlight.toLowerCase().split(/\s+/).filter(x => x); + if (terms.length === 0) return; // nothing to do + + // There should never be more than one element matching "div.body" + const divBody = document.querySelectorAll("div.body"); + const body = divBody.length ? divBody[0] : document.querySelector("body"); + window.setTimeout(() => { + terms.forEach((term) => _highlightText(body, term, "highlighted")); + }, 10); + + const searchBox = document.getElementById("searchbox"); + if (searchBox === null) return; + searchBox.appendChild( + document + .createRange() + .createContextualFragment( + '" + ) + ); + }, + + /** + * helper function to hide the search marks again + */ + hideSearchWords: () => { + document + .querySelectorAll("#searchbox .highlight-link") + .forEach((el) => el.remove()); + document + .querySelectorAll("span.highlighted") + .forEach((el) => el.classList.remove("highlighted")); + localStorage.removeItem("sphinx_highlight_terms") + }, + + initEscapeListener: () => { + // only install a listener if it is really needed + if (!DOCUMENTATION_OPTIONS.ENABLE_SEARCH_SHORTCUTS) return; + + document.addEventListener("keydown", (event) => { + // bail for input elements + if (BLACKLISTED_KEY_CONTROL_ELEMENTS.has(document.activeElement.tagName)) return; + // bail with special keys + if (event.shiftKey || event.altKey || event.ctrlKey || event.metaKey) return; + if (DOCUMENTATION_OPTIONS.ENABLE_SEARCH_SHORTCUTS && (event.key === "Escape")) { + SphinxHighlight.hideSearchWords(); + event.preventDefault(); + } + }); + }, +}; + +_ready(() => { + /* Do not call highlightSearchWords() when we are on the search page. + * It will highlight words from the *previous* search query. + */ + if (typeof Search === "undefined") SphinxHighlight.highlightSearchWords(); + SphinxHighlight.initEscapeListener(); +}); diff --git a/docs/html/appendix.html b/docs/html/appendix.html new file mode 100644 index 0000000..3848981 --- /dev/null +++ b/docs/html/appendix.html @@ -0,0 +1,140 @@ + + + + + + + + 附录 — netrans 0.1 文档 + + + + + + + + + + + + + + + + + + + + +
+ + +
+
+ + + + + + + \ No newline at end of file diff --git a/docs/html/config.html b/docs/html/config.html new file mode 100644 index 0000000..ed2713a --- /dev/null +++ b/docs/html/config.html @@ -0,0 +1,136 @@ + + + + + + + + config module — netrans 0.1 文档 + + + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +
+

config module

+
+
+class config.Config(source_obj)[源代码]
+

基类:AttributeCopier

+

从实例化的 Netrans 中解析模型参数,并基于pnnacc 生成配置文件模板

+
+
参数:
+

Netrans (class) -- 实例化的Netrans类,包含 模型信息 和 Netrans 信息

+
+
+
+
+inputmeta_gen(*args, **kargs)
+
+ +
+ +
+ + +
+ +
+
+ +
+
+ + + + + + + \ No newline at end of file diff --git a/docs/html/dump.html b/docs/html/dump.html new file mode 100644 index 0000000..4c43501 --- /dev/null +++ b/docs/html/dump.html @@ -0,0 +1,119 @@ + + + + + + + + dump module — netrans 0.1 文档 + + + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +
+

dump module

+
+ + +
+ +
+
+ +
+
+ + + + + + + \ No newline at end of file diff --git a/docs/html/example.html b/docs/html/example.html new file mode 100644 index 0000000..8a71d25 --- /dev/null +++ b/docs/html/example.html @@ -0,0 +1,124 @@ + + + + + + + + example module — netrans 0.1 文档 + + + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +
+

example module

+
+
+example.main()[源代码]
+
+ +
+ + +
+ +
+
+ +
+
+ + + + + + + \ No newline at end of file diff --git a/docs/html/export.html b/docs/html/export.html new file mode 100644 index 0000000..1580100 --- /dev/null +++ b/docs/html/export.html @@ -0,0 +1,141 @@ + + + + + + + + export module — netrans 0.1 文档 + + + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +
+

export module

+
+
+class export.Export(source_obj)[源代码]
+

基类:AttributeCopier

+

从实例化的 Netrans 中解析模型参数,并基于 pnnacc 导出模型ngb文件

+
+
参数:
+

Netrans (class) -- 实例化的Netrans类,包含 模型信息 和 Netrans 信息

+
+
+
+
+export_network(*args, **kargs)
+
+ +
+ +
+
+export.main()[源代码]
+
+ +
+ + +
+ +
+
+ +
+
+ + + + + + + \ No newline at end of file diff --git a/docs/html/file_model.html b/docs/html/file_model.html new file mode 100644 index 0000000..272aa53 --- /dev/null +++ b/docs/html/file_model.html @@ -0,0 +1,119 @@ + + + + + + + + file_model module — netrans 0.1 文档 + + + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +
+

file_model module

+
+ + +
+ +
+
+ +
+
+ + + + + + + \ No newline at end of file diff --git a/docs/html/gen_api.html b/docs/html/gen_api.html new file mode 100644 index 0000000..b185ea6 --- /dev/null +++ b/docs/html/gen_api.html @@ -0,0 +1,249 @@ + + + + + + + + gen api html & pdf by sphinx — netrans 0.1 文档 + + + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +
+

gen api html & pdf by sphinx

+

netrans 目录结构如下

+
netrans/
+│
+├── docs/                # Sphinx 项目的根目录
+│   ├── source/          # 源文件目录
+│   │   ├── _static/     # 静态文件(如图片、CSS、JS)
+│   │   ├── _templates/  # 自定义模板
+│   │   ├── conf.py      # 配置文件
+│   │   ├── index.rst    # 主页文件
+│   │   └── my_module.rst # 其他文档文件
+│   └── build/           # 构建输出目录(生成的 HTML 文件等)
+│
+└── bin/         
+└── netrans_cli/          
+└── netrans_py/           
+
+
+
    +
  1. sphinx-quickstart docs/ 快速生成

  2. +
  3. 修改 docs/source/conf.py ,

  4. +
+
+

*.rst

+

rst, reStructuredText 文件,用于定义文档的结构。通常放在source目录下。

+

rst 是一种和 markdown 类似的语法

+

使用目录树指令 .. toctree::,列出其他文档文件。

+
+
+

使用 autodoc + Sphinx 实现 python api 文档(html)

+
    +
  1. 修改 docs/source/conf.py

  2. +
+
# Configuration file for the Sphinx documentation builder.
+#
+# For the full list of built-in configuration values, see the documentation:
+# https://www.sphinx-doc.org/en/master/usage/configuration.html
+
+# -- Project information -----------------------------------------------------
+# https://www.sphinx-doc.org/en/master/usage/configuration.html#project-information
+
+project = 'netrans'
+copyright = '2025, ccyh'
+author = 'xj'
+release = '0.1'
+
+# -- General configuration ---------------------------------------------------
+# https://www.sphinx-doc.org/en/master/usage/configuration.html#general-configuration
+
+import os
+import sys
+
+sys.path.append('../../netrans_py/')
+sys.path.append('../../')
+
+# Sphinx 扩展
+extensions = [
+    'sphinx.ext.autodoc',    # 自动生成文档
+    'sphinx.ext.viewcode',   # 添加源代码链接
+    'sphinx.ext.napoleon',   # 支持 NumPy 和 Google 风格的 docstring
+]
+
+# 主题
+html_theme = 'sphinx_rtd_theme'
+
+templates_path = ['_templates']
+exclude_patterns = []
+
+language = 'zh'
+
+# -- Options for HTML output -------------------------------------------------
+# https://www.sphinx-doc.org/en/master/usage/configuration.html#options-for-html-output
+
+html_theme = 'alabaster'
+html_static_path = ['_static']
+
+source_suffix = {
+    '.rst': 'restructuredtext',
+    '.md': 'markdown',
+}
+
+
+
    +
  1. sphinx-apidoc -o docs/source/ . +生成 netrans_py 下所有的 *.py 的rst, 并添加到index.rst里.

  2. +
+
# index.rst
+
+
+
    +
  1. sphinx-build -b html docs/source docs/build

  2. +
+
+
+

使用 autodoc + Sphinx 实现 python api 文档(pdf)

+
    +
  1. 在可以生成 html的 基础上, 使用make latexodf 生成 *.tex文件. +这一步会报错,原因是无法识别中文

  2. +
+

2.修改 netrans.tex文件

+
cd build/latex
+vim netrans.tex
+
+
+

在各种usapackage的地方新增:

+
\usepackage[UTF8, fontset=ubuntu]{ctex}
+
+
+
    +
  1. 使用 xelatex 生成pdf +sphinx使用的是 xelatex 而非 pdflatex

  2. +
+
xelatex netrans.tex 
+
+
+
+
+

常见报错

+

报错

+
sphinx-quickstart
+Traceback (most recent call last):
+  File "/home/xj/app/miniforge3/envs/sphinx/bin/sphinx-quickstart", line 8, in <module>
+    sys.exit(main())
+  File "/home/xj/app/miniforge3/envs/sphinx/lib/python3.10/site-packages/sphinx/cmd/quickstart.py", line 721, in main
+    locale.setlocale(locale.LC_ALL, '')
+  File "/home/xj/app/miniforge3/envs/sphinx/lib/python3.10/locale.py", line 620, in setlocale
+    return _setlocale(category, locale)
+locale.Error: unsupported locale setting
+
+
+

解决: +export LC_ALL=en_US.UTF-8

+
+
+ + +
+ +
+
+ +
+
+ + + + + + + \ No newline at end of file diff --git a/docs/html/genindex.html b/docs/html/genindex.html new file mode 100644 index 0000000..ae65cd4 --- /dev/null +++ b/docs/html/genindex.html @@ -0,0 +1,341 @@ + + + + + + + 索引 — netrans 0.1 文档 + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+ + +

索引

+ +
+ A + | C + | E + | F + | I + | M + | N + | Q + | R + | U + +
+

A

+ + +
+ +

C

+ + + +
+ +

E

+ + + +
    +
  • + example + +
  • +
  • + export + +
  • +
+ +

F

+ + +
    +
  • + file_model + +
  • +
+ +

I

+ + + +
+ +

M

+ + +
+ +

N

+ + +
    +
  • + netrans + +
  • +
+ +

Q

+ + + +
    +
  • + quantize + +
  • +
  • + quantize_hb + +
  • +
+ +

R

+ + +
+ +

U

+ + +
    +
  • + utils + +
  • +
+ + + +
+ +
+
+ +
+
+ + + + + + + \ No newline at end of file diff --git a/docs/html/import_model.html b/docs/html/import_model.html new file mode 100644 index 0000000..9a3425e --- /dev/null +++ b/docs/html/import_model.html @@ -0,0 +1,267 @@ + + + + + + + + import_model module — netrans 0.1 文档 + + + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +
+

import_model module

+
+
+class import_model.ImportModel(source_obj)[源代码]
+

基类:AttributeCopier

+

从实例化的 Netrans 中解析模型参数,并基于 pnnacc 导入模型

+
+
参数:
+

Netrans (class) -- 实例化的Netrans类,包含 模型信息 和 Netrans 信息

+
+
+
+
+import_network(*args, **kargs)
+
+ +
+ +
+
+import_model.check_status(result)[源代码]
+

解析命令执行情况

+
+
参数:
+

result (return of subprocrss.run) -- subprocess.run的返回值

+
+
+
+ +
+
+import_model.import_caffe_network(name, netrans_path)[源代码]
+

导入 caffe 模型

+
+
参数:
+
    +
  • name (str) -- 模型名字

  • +
  • netrans_path (str) -- 模型路径

  • +
+
+
返回:
+

生成的pnnacc 命令行, 被subprocesses执行

+
+
返回类型:
+

cmd (str)

+
+
+
+ +
+
+import_model.import_darknet_network(name, netrans_path)[源代码]
+

导入 darknet 模型

+
+
参数:
+
    +
  • name (str) -- 模型名字

  • +
  • netrans_path (str) -- 模型路径

  • +
+
+
返回:
+

生成的pnnacc 命令行, 被subprocesses执行

+
+
返回类型:
+

cmd (str)

+
+
+
+ +
+
+import_model.import_onnx_network(name, netrans_path)[源代码]
+

导入 onnx 模型

+
+
参数:
+
    +
  • name (str) -- 模型名字

  • +
  • netrans_path (str) -- 模型路径

  • +
+
+
返回:
+

生成的pnnacc 命令行, 被subprocesses执行

+
+
返回类型:
+

cmd (str)

+
+
+
+ +
+
+import_model.import_pytorch_network(name, netrans_path)[源代码]
+

导入 pytorch 模型

+
+
参数:
+
    +
  • name (str) -- 模型名字

  • +
  • netrans_path (str) -- 模型路径

  • +
+
+
返回:
+

生成的pnnacc 命令行, 被subprocesses执行

+
+
返回类型:
+

cmd (str)

+
+
+
+ +
+
+import_model.import_tensorflow_network(name, netrans_path)[源代码]
+

导入 tensorflow 模型

+
+
参数:
+
    +
  • name (str) -- 模型名字

  • +
  • netrans_path (str) -- 模型路径

  • +
+
+
返回:
+

生成的pnnacc 命令行, 被subprocesses执行

+
+
返回类型:
+

cmd (str)

+
+
+
+ +
+
+import_model.import_tflite_network(name, netrans_path)[源代码]
+

导入 tflite 模型

+
+
参数:
+
    +
  • name (str) -- 模型名字

  • +
  • netrans_path (str) -- 模型路径

  • +
+
+
返回:
+

生成的pnnacc 命令行, 被subprocesses执行

+
+
返回类型:
+

cmd (str)

+
+
+
+ +
+ + +
+ +
+
+ +
+
+ + + + + + + \ No newline at end of file diff --git a/docs/html/index.html b/docs/html/index.html new file mode 100644 index 0000000..2a170ff --- /dev/null +++ b/docs/html/index.html @@ -0,0 +1,145 @@ + + + + + + + + netrans documentation — netrans 0.1 文档 + + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +
+

netrans documentation

+

netrans 是一套针对pnna 芯片的模型处理工具,提供命令行工具 netrans_cli 和 python api netrans_py, 其核心功能是将模型权重转换成在pnna芯片上运行的 nbg(network binary graph)格式(.nb 为后缀)。

+ +
+ + +
+ +
+
+ +
+
+ + + + + + + \ No newline at end of file diff --git a/docs/html/infer.html b/docs/html/infer.html new file mode 100644 index 0000000..b6c9fac --- /dev/null +++ b/docs/html/infer.html @@ -0,0 +1,135 @@ + + + + + + + + infer module — netrans 0.1 文档 + + + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +
+

infer module

+
+
+class infer.Infer(source_obj)[源代码]
+

基类:AttributeCopier

+
+
+inference_network(*args, **kargs)
+
+ +
+ +
+
+infer.main()[源代码]
+
+ +
+ + +
+ +
+
+ +
+
+ + + + + + + \ No newline at end of file diff --git a/docs/html/modules.html b/docs/html/modules.html new file mode 100644 index 0000000..f2ff26f --- /dev/null +++ b/docs/html/modules.html @@ -0,0 +1,193 @@ + + + + + + + + netrans_py — netrans 0.1 文档 + + + + + + + + + + + + + + + + + + + + +
+ + +
+
+ + + + + + + \ No newline at end of file diff --git a/docs/html/netrans.html b/docs/html/netrans.html new file mode 100644 index 0000000..1a3a70b --- /dev/null +++ b/docs/html/netrans.html @@ -0,0 +1,119 @@ + + + + + + + + netrans module — netrans 0.1 文档 + + + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +
+

netrans module

+
+ + +
+ +
+
+ +
+
+ + + + + + + \ No newline at end of file diff --git a/docs/html/netrans_cli.html b/docs/html/netrans_cli.html new file mode 100644 index 0000000..570947a --- /dev/null +++ b/docs/html/netrans_cli.html @@ -0,0 +1,318 @@ + + + + + + + + netrans_cli 使用 — netrans 0.1 文档 + + + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +
+

netrans_cli 使用

+

netrans_cli 是 netrans 进行模型转换的命令行工具,使用 ntrans_cli 完成模型转换的步骤如下:

+
    +
  1. 导入模型

  2. +
  3. 生成并修改前处理配置文件 *_inputmeta.yml

  4. +
  5. 量化模型

  6. +
  7. 导出模型

  8. +
+
+

netrans_cli 脚本

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
脚本功能使用
load.sh模型导入功能,将模型转换成 Pnna 支持的格式load.sh model_name
config.sh预处理模版生成功能,生成预处理模版,根据模型进行对于的修改config.sh model_name
quantize.sh量化功能, 对模型进行量化生成量化参数文件quantize.sh model_name quantize_data_type
export.sh导出功能,将量化好的模型导出成 Pnna 上可以运行的runtimeexport.sh model_name quantize_data_type

对于不同框架下训练的模型,需要准备不同的数据,所有的数据都需要与模型放在同一个文件夹下,模型文件名和文件夹名需要保持一致。

+
+
+

load.sh 模型导入

+

使用 load.sh 导入模型

+
    +
  • 用法: load.sh 以模型文件名命名的模型数据文件夹,例如:

    +
    load.sh lenet
    +
    +
    +

    "lenet"是文件夹名,也作为模型名和权重文件名。导入会打印相关日志信息,成功后会打印SUCESS。导入后lenet文件夹应该有"lenet.json"和"lenet.data"文件:

    +
    $ ls -lrt lenet
    +total 3396
    +-rwxr-xr-x 1 hope hope 1727201 Nov  5  2018 lenet.pb
    +-rw-r--r-- 1 hope hope     553 Nov  5  2018 0.jpg
    +-rwxr--r-- 1 hope hope       6 Apr 21 17:04 dataset.txt
    +-rw-rw-r-- 1 hope hope      69 Jun  7 09:19 inputs_outputs.txt
    +-rw-r--r-- 1 hope hope    5553 Jun  7 09:21 lenet.json
    +-rw-r--r-- 1 hope hope 1725178 Jun  7 09:21 lenet.data
    +
    +
    +
  • +
+
+
+

config.sh 预处理配置文件生成

+

使用 config.sh 生成 inputmeta 文件

+
    +
  • config.sh 以模型文件名命名的模型数据文件夹,例如:

    +
    config.sh lenet
    +
    +
    +

    inputmeta 文件生成会打印相关日志信息,成功后会打印SUCESS。导入后lenet文件夹应该有 "lenet_inputmeta.yml" 文件:

    +
     $ ls -lrt lenet
    +total 3400
    +-rwxr-xr-x 1 hope hope 1727201 Nov  5  2018 lenet.pb
    +-rw-r--r-- 1 hope hope     553 Nov  5  2018 0.jpg
    +-rwxr--r-- 1 hope hope       6 Apr 21 17:04 dataset.txt
    +-rw-rw-r-- 1 hope hope      69 Jun  7 09:19 inputs_outputs.txt
    +-rw-r--r-- 1 hope hope    5553 Jun  7 09:21 lenet.json
    +-rw-r--r-- 1 hope hope 1725178 Jun  7 09:21 lenet.data
    +-rw-r--r-- 1 hope hope     948 Jun  7 09:35 lenet_inputmeta.yml
    +
    +
    +

    可以看到,最终生成的是*.yml文件,该文件用于为Netrans中间模型配置输入层数据集合。Netrans中的量化、推理、导出和图片转dat的操作都需要用到这个文件。因此,此步骤不可跳过。

    +
  • +
+

Inputmeta.yml文件结构如下:

+
%YAML 1.2
+---
+# !!!This file disallow TABs!!!
+# "category" allowed values: "image, undefined"
+# "database" allowed types: "H5FS, SQLITE, TEXT, LMDB, NPY, GENERATOR"
+# "tensor_name" only support in H5FS database
+# "preproc_type" allowed types:"IMAGE_RGB, IMAGE_RGB888_PLANAR, IMAGE_RGB888_PLANAR_SEP, 
+IMAGE_I420, 
+# IMAGE_NV12, IMAGE_YUV444, IMAGE_GRAY, IMAGE_BGRA, TENSOR"
+input_meta:
+ databases:
+ - path: dataset.txt
+ type: TEXT
+ ports:
+ - lid: data_0
+ category: image
+ dtype: float32
+ sparse: false
+ tensor_name:
+ layout: nhwc
+ shape:
+ - 50
+ - 224
+ - 224
+ - 3
+ preprocess:
+ reverse_channel: false
+ mean:
+ - 103.94
+ - 116.78
+ - 123.67
+ scale: 0.017
+ preproc_node_params:
+ preproc_type: IMAGE_RGB
+ add_preproc_node: false
+ preproc_perm:
+ - 0
+ - 1
+ - 2
+ - 3
+ - lid: label_0
+ redirect_to_output: true
+ category: undefined
+ tensor_name:
+ dtype: float32
+ shape:
+ - 1
+ - 1
+
+
+

上面示例文件的各个参数解释:

+
:widths: 20, 80
+:align: left
+|  参数   | 说明  |
+| :---  | ---  |
+| input_meta  | 预处理参数配置申明。 |
+| databases  | 数据配置,包括设置 path、type 和 ports 。|
+| path  | 数据集文件的相对(执行目录)或绝对路径。默认为 dataset.txt, 不建议修改。 |
+| type  | 数据集文件格式,固定为TEXT。 |
+| ports  | 指向网络中的输入或重定向的输入,目前只支持一个输入,如果网络存在多个输入,请与@ccyh联系。 |
+| lid  | 输入层的lid |
+| category  | 输入的类别。将此参数设置为以下值之一:image(图像输入)或 undefined(其他类型的输入)。 |
+| dtype  | 输入张量的数据类型,用于将数据发送到 Pnna 网络的输入端口。支持的数据类型包括 float32 和 quantized。 |
+| sparse  | 指定网络张量是否以稀疏格式存在。将此参数设置为以下值之一:true(稀疏格式)或 false(压缩格式)。 |
+| tensor_name  | 留空此参数 |
+| layout  | 输入张量的格式,使用 nchw 用于 Caffe、Darknet、ONNX 和 PyTorch 模型。使用 nhwc 用于 TensorFlow、TensorFlow Lite 和 Keras 模型。 |
+| shape  | 此张量的形状。第一维,shape[0],表示每批的输入数量,允许在一次推理操作之前将多个输入发送到网络。如果batch维度设置为0,则需要从命令行指定--batch-size。如果 batch维度设置为大于1的值,则直接使用inputmeta.yml中的batch size并忽略命令行中的--batch-size。 |
+| fitting  | 保留字段 |
+| preprocess  | 预处理步骤和顺序。预处理支持下面的四个键,键的顺序代表预处理的顺序。您可以相应地调整顺序。 |
+| reverse_channel  | 指定是否保留通道顺序。将此参数设置为以下值之一:true(保留通道顺序)或 false(不保留通道顺序)。对于 TensorFlow 和 TensorFlow Lite 框架的模型使用 true。 |
+| mean  | 用于每个通道的均值。 |
+| scale  | 张量的缩放值。均值和缩放值用于根据公式 (inputTensor - mean) × scale 归一化输入张量。|
+| preproc_node_params  | 预处理节点参数,在 OVxlib C 项目案例中启用预处理任务 |
+| add_preproc_node  | 用于处理 OVxlib C 项目案例中预处理节点的插入。[true, false] 中的布尔值,表示通过配置以下参数将预处理层添加到导出的应用程序中。此参数仅在 add_preproc_node 参数设置为 true 时有效。|
+| preproc_type  | 预处理节点输入类型。 [IMAGE_RGB, IMAGE_RGB888_PLANAR,IMAGE_YUV420, IMAGE_GRAY, IMAGE_BGRA, TENSOR] 中的字符串值 |
+| preproc_perm  | 预处理节点输入的置换参数。 |
+| redirect_to_output  | 将database张量重定向到图形输出的特殊属性。如果为该属性设置了一个port,网络构建器将自动为该port生成一个输出层,以便后处理文件可以直接处理来自database的张量。 如果使用网络进行分类,则上例中的lid“input_0”表示输入数据集的标签lid。 您可以设置其他名称来表示标签的lid。 请注意,redirect_to_output 必须设置为 true,以便后处理文件可以直接处理来自database的张量。 标签的lid必须与后处理文件中定义的 labels_tensor 的lid相同。 [true, false] 中的布尔值。 指定是否将由张量表示的输入端口的数据直接发送到网络输出。true(直接发送到网络输出)或 false(不直接发送到网络输出)|
+
+
+

可以根据实际情况对生成的inputmeta文件进行修改。

+
+
+

quantize.sh 模型量化

+

如果我们训练好的模型的数据类型是float32的,为了使模型以更高的效率在Pnna上运行,我们可以对模型进行量化操作,量化操作可能会带来一定程度的精度损失。

+
    +
  • 在netrans_cli目录下使用quantize.sh脚本进行量化操作。

  • +
+

用法:./quantize.sh 以模型文件名命名的模型数据文件夹 量化类型,例如:

+
quantize.sh lenet uint8
+
+
+

支持的量化类型有:uint8、int8、int16

+
+
+

export.sh 模型导出

+

使用 export.sh 导出模型生成nbg文件。

+

用法:export.sh 以模型文件名命名的模型数据文件夹 数据类型,例如:

+
export.sh lenet uint8
+
+
+

导出支持的数据类型:float、uint8、int8、int16,其中使用uint8、int8、int16导出时需要先进行模型量化。导出的工程会在模型所在的目录下面的wksp目录里。 +network_binary.nb文件在"asymmetric_affine"文件夹中:

+
ls -lrt lenet/wksp/asymmetric_affine/
+-rw-r--r-- 1 hope hope 694912 Jun  7 09:55 network_binary.nb
+
+
+

目前支持将生成的network_binary.nb文件部署到Pnna硬件平台。具体部署方法请参阅模型部署相关文档。

+
+
+

使用示例

+

请参照examples,examples 提供 caffe 模型转换示例,darknet 模型转换示例,tensorflow 模型转换示例,onnx 模型转换示例

+
+
+ + +
+ +
+
+ +
+
+ + + + + + + \ No newline at end of file diff --git a/docs/html/netrans_py.html b/docs/html/netrans_py.html new file mode 100644 index 0000000..beb7bbc --- /dev/null +++ b/docs/html/netrans_py.html @@ -0,0 +1,276 @@ + + + + + + + + netrans_py 使用 — netrans 0.1 文档 + + + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +
+

netrans_py 使用

+

netrans_py 为 Netrans 编译器的 python 调用接口。 +使用 ntrans_py 完成模型转换的步骤如下:

+
    +
  1. 导入模型

  2. +
  3. 生成并修改前处理配置文件 *_inputmeta.yml

  4. +
  5. 量化模型

  6. +
  7. 导出模型

  8. +
+
+

Netrans 类

+

创建 Netrans

+

描述: 实例化 Netrans 类。 +代码示例:

+
from netrans import Netrans
+yolo_netrans = Netrans("../examples/darknet/yolov4_tiny")
+
+
+

参数

+ + + + + + + + + + + + + + + + + + + + +
参数名类型说明
model_pathstr第一位置参数,模型文件的路径
netansstr如果 NETRANS_PATH 没有设置,可通过该参数指定netrans的路径

输出返回: +无。

+
+
+

Netrans.import 模型导入

+

描述: 将模型转换成 Pnna 支持的格式。 +代码示例:

+
yolo_netrans.import()
+
+
+

参数: +无。

+

输出返回: +无。 +在工程目录下生成 Pnna 支持的模型格式,以.json结尾的模型文件和 .data结尾的权重文件。

+
+
+

Netrans.config 预处理配置文件生成

+

描述: 将模型转换成 Pnna 支持的格式。 +代码示例:

+
yolo_netrans.config()
+
+
+

参数:

+
:widths: 20, 30, 50
+:align: left
+ | 参数名 | 类型 | 说明  |
+|:---| -- | -- |
+|inputmeta| bool,str, [Fasle, True, "inputmeta_filepath"] | 指定 inputmeta, 默认为False。 <br/> 如果为False,则会生成inputmeta模板,可使用mean、scale、reverse_channel 配合修改常用参数。<br/>如果已有现成的 inputmeta 文件,则可通过该参数进行指定,也可使用True, 则会自动索引 model_name_inputmeta.yml |
+|mean| float, int, list | 设置预处理中 normalize 的 mean 参数 |
+|scale| float, int, list | 设置预处理中 normalize 的 scale 参数 |
+|reverse_channel | bool | 设置预处理中的 reverse_channel 参数 |
+
+
+

输出返回: +无。

+
+
+

Netrans.quantize 模型量化

+

描述: 对模型生成量化配置文件。 +代码示例:

+
yolo_netrans.quantize("uint8")
+
+
+

参数:

+
:widths: 20, 30, 50
+:align: left
+| 参数名 | 类型 | 说明  |
+|:---| -- | -- |
+|quantize_type| str| 第一位置参数,模型量化类型,仅支持 "uint8", "int8", "int16"|
+
+
+

输出返回: +无。

+
+
+

Netrans.export 模型导出

+

描述: 对模型生成量化配置文件。 +代码示例:

+
yolo_netrans.export()
+
+
+

参数: +无。

+

输出返回: +无。请在目录 “wksp/*/” 下检查是否生成nbg文件。

+
+
+

Netrans.model2nbg 模型生成nbg文件

+

描述: 模型导入、量化、及nbg文件生产 +代码示例:

+
 # 无预处理
+yolo_netrans.model2nbg(quantize_type='uint8')
+ # 需要对数据进行normlize, menas为128, scale 为 0.0039
+yolo_netrans.model2nbg(quantize_type='uint8',mean=128, scale = 0.0039)
+ # 需要对数据分通道进行normlize, menas为128,127,125,scale 为 0.0039, 且reverse_channel 为 True 
+yolo_netrans.model2nbg(quantize_type='uint8'mean=[128, 127, 125], scale = 0.0039, reverse_channel= True)
+ # 已经进行初始化设置
+yolo_netrans.model2nbg(quantize_type='uint8', inputmeta=True)
+
+
+

参数

+
:widths: 20, 30, 50
+:align: left
+| 参数名 | 类型 | 说明  |
+|:---| -- | -- |
+|quantize_type| str, ["uint8", "int8", "int16" ] | 量化类型,将模型量化成该参数指定的类型 |
+|inputmeta| bool,str, [Fasle, True, "inputmeta_filepath"] | 指定 inputmeta, 默认为False。 <br/> 如果为False,则会生成inputmeta模板,可使用mean、scale、reverse_channel 配合修改常用参数。<br/>如果已有现成的 inputmeta 文件,则可通过该参数进行指定,也可使用True, 则会自动索引 model_name_inputmeta.yml |
+|mean| float, int, list | 设置预处理中 normalize 的 mean 参数 |
+|scale| float, int, list | 设置预处理中 normalize 的 scale 参数 |
+|reverse_channel | bool | 设置预处理中的 reverse_channel 参数 |
+
+
+

输出返回: +请在目录 “wksp/*/” 下检查是否生成nbg文件。

+
+
+

使用示例

+
from nertans import Netrans
+model_path = 'example/darknet/yolov4_tiny'
+netrans_path = "netrans/bin" # 如果进行了export定义申明,这一步可以不用
+
+# 初始化netrans
+net = Netrans(model_path,netrans=netrans_path)
+# 模型载入
+net.import()
+# 配置预处理 normlize 的参数
+net.config(scale=1,mean=0)
+# 模型量化
+net.quantize("uint8")
+# 模型导出
+net.export()
+
+# 模型直接量化成 int16 并导出, 直接复用刚配置好的 inputmeta
+net.model2nbg(quantize_type = "int16", inputmeta=True)
+
+
+
+
+ + +
+ +
+
+ +
+
+ + + + + + + \ No newline at end of file diff --git a/docs/html/objects.inv b/docs/html/objects.inv new file mode 100644 index 0000000..0ee4aac Binary files /dev/null and b/docs/html/objects.inv differ diff --git a/docs/html/py-modindex.html b/docs/html/py-modindex.html new file mode 100644 index 0000000..8f34aaf --- /dev/null +++ b/docs/html/py-modindex.html @@ -0,0 +1,193 @@ + + + + + + + Python 模块索引 — netrans 0.1 文档 + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+ + +

Python 模块索引

+ +
+ c | + e | + f | + i | + n | + q | + u +
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
 
+ c
+ config +
 
+ e
+ example +
+ export +
 
+ f
+ file_model +
 
+ i
+ import_model +
+ infer +
 
+ n
+ netrans +
 
+ q
+ quantize +
+ quantize_hb +
 
+ u
+ utils +
+ + +
+ +
+
+ +
+
+ + + + + + + \ No newline at end of file diff --git a/docs/html/quantize.html b/docs/html/quantize.html new file mode 100644 index 0000000..657e0f9 --- /dev/null +++ b/docs/html/quantize.html @@ -0,0 +1,133 @@ + + + + + + + + quantize module — netrans 0.1 文档 + + + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +
+

quantize module

+
+
+class quantize.Quantize(source_obj)[源代码]
+

基类:AttributeCopier

+

解析 Netrans 参数,基于 pnnacc 量化模型 +:param cla: 实例化以后的 Netrans 类,需要解析里面包含的参数 +:type cla: class

+
+
+quantize_network(*args, **kargs)
+
+ +
+ +
+ + +
+ +
+
+ +
+
+ + + + + + + \ No newline at end of file diff --git a/docs/html/quantize_hb.html b/docs/html/quantize_hb.html new file mode 100644 index 0000000..9bf578e --- /dev/null +++ b/docs/html/quantize_hb.html @@ -0,0 +1,135 @@ + + + + + + + + quantize_hb module — netrans 0.1 文档 + + + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +
+

quantize_hb module

+
+
+class quantize_hb.Quantize(source_obj)[源代码]
+

基类:AttributeCopier

+
+
+quantize_network(*args, **kargs)
+
+ +
+ +
+
+quantize_hb.main()[源代码]
+
+ +
+ + +
+ +
+
+ +
+
+ + + + + + + \ No newline at end of file diff --git a/docs/html/quick_start_guide.html b/docs/html/quick_start_guide.html new file mode 100644 index 0000000..8187fac --- /dev/null +++ b/docs/html/quick_start_guide.html @@ -0,0 +1,254 @@ + + + + + + + + 快速入门 — netrans 0.1 文档 + + + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +
+

快速入门

+

本文档以 onnx 格式的 yolov5s 为例,演示如何快速安装Nertans 并使用 Netrans 量化、编译模型并生成 nbg 文件。

+
+

系统环境

+
    +
  • Linux操作系统,推荐 Ubuntu 20.04 或 Debian12

  • +
  • Python 3.8

  • +
  • RAM 至少 8GB

  • +
+
+
+

安装Netrans

+

创建 python3.8 环境

+
wget "https://github.com/conda-forge/miniforge/releases/latest/download/Miniforge3-$(uname)-$(uname -m).sh"
+mkdir -p ~/app
+INSTALL_PATH="${HOME}/app/miniforge3"
+bash Miniforge3-Linux-x86_64.sh -b -p ${INSTALL_PATH}
+echo "source "${INSTALL_PATH}/etc/profile.d/conda.sh"" >> ${HOME}/.bashrc
+echo "source "${INSTALL_PATH}/etc/profile.d/mamba.sh"" >> ${HOME}/.bashrc
+source ${HOME}/.bashrc
+mamba create -n netrans python=3.8 -y
+mamba activate netrans
+
+
+

下载 Netrans

+
cd ~/app
+git clone https://gitlink.org.cn/nudt_dsp/netrans.git
+
+
+

配置 Netrans

+
cd ~/app/netrans
+./setup.sh
+
+
+
+
+

使用 Netrans 编译 yolov5s 模型

+

进入工作目录

+
cd ~/app/netrans/examples/onnx
+
+
+

此时目录如下:

+
onnx/
+├── README.md
+└── yolov5s
+    ├── 0.jpg
+    ├── dataset.txt
+    └── yolov5s.onnx
+
+
+
+

使用 netrans_cli 编译 yolov5s

+
+

导入模型

+
load.sh yolov5s
+
+
+

该命令会在工程目录下生成包含模型信息的 .json 和 .data 数据文件。

+

此时 yolov5s 的目录结构如下

+
yolov5s/
+├── 0.jpg
+├── yolov5s.data
+├── yolov5s.json
+└── yolov5s.onnx
+
+
+
+
+

生成配置文件模板

+

配置文件定义输入数据前处理相关参数。Netrans预定义了配置文件模板生成脚本,用户需根据模型前处理参数对配置文件进行修改。

+
config.sh yolov5s
+
+
+

此时 yolov5s 的目录结构如下:

+
yolov5s/
+├── 0.jpg
+├── dataset.txt
+├── yolov5s.data
+├── yolov5s_inputmeta.yml
+├── yolov5s.json
+└── yolov5s.onnx
+
+
+

根据 yolov5s 的前处理参数 ,修改 yml 中的 scale 为 0.003921568627。 +打开 yolov5s_inputmeta.yml 文件,修改第30-33行:

+
        scale:
+        - 0.003921568627
+        - 0.003921568627
+        - 0.003921568627
+
+
+
+
+

量化模型

+

生成 unit8 量化的量化参数文件

+
quantize.sh yolov5s uint8
+
+
+

此时 yolov5s 的目录结构如下:

+
yolov5s/
+├── 0.jpg
+├── dataset.txt
+├── yolov5s_asymmetric_affine.quantize
+├── yolov5s.data
+├── yolov5s_inputmeta.yml
+├── yolov5s.json
+└── yolov5s.onnx
+
+
+
+
+

导出模型

+

导出 unit8 量化的模型项目工程

+
export.sh yolov5s uint8
+
+
+

此时 yolov5s 的目录结构如下:

+
yolov5s/
+├── 0.jpg
+├── dataset.txt
+├── wksp
+│   └── asymmetric_affine
+│       └── network_binary.nb
+├── yolov5s_asymmetric_affine.quantize
+├── yolov5s.data
+├── yolov5s_inputmeta.yml
+├── yolov5s.json
+└── yolov5s.onnx
+
+
+
+
+
+

使用 netrans_py 编译 yolov5s 模型

+
example.py yolov5s -q uint8 -m 0 -s 0.003921568627
+
+
+
+
+
+ + +
+ +
+
+ +
+
+ + + + + + + \ No newline at end of file diff --git a/docs/html/search.html b/docs/html/search.html new file mode 100644 index 0000000..93d91a9 --- /dev/null +++ b/docs/html/search.html @@ -0,0 +1,122 @@ + + + + + + + 搜索 — netrans 0.1 文档 + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +

搜索

+ + + + +

+ 当搜索多个关键词时,只会显示同时包含所有关键词的内容。 +

+ + +
+ + + +
+ + +
+ + +
+ +
+
+ +
+
+ + + + + + + \ No newline at end of file diff --git a/docs/html/searchindex.js b/docs/html/searchindex.js new file mode 100644 index 0000000..51b331b --- /dev/null +++ b/docs/html/searchindex.js @@ -0,0 +1 @@ +Search.setIndex({"alltitles":{"*.rst":[[6,"rst"]],"Contents:":[[8,null]],"Netrans \u7c7b":[[13,"netrans"]],"Netrans.config \u9884\u5904\u7406\u914d\u7f6e\u6587\u4ef6\u751f\u6210":[[13,"netrans-config"]],"Netrans.export \u6a21\u578b\u5bfc\u51fa":[[13,"netrans-export"]],"Netrans.import \u6a21\u578b\u5bfc\u5165":[[13,"netrans-import"]],"Netrans.model2nbg \u6a21\u578b\u751f\u6210nbg\u6587\u4ef6":[[13,"netrans-model2nbg-nbg"]],"Netrans.quantize \u6a21\u578b\u91cf\u5316":[[13,"netrans-quantize"]],"config module":[[1,null]],"config.sh \u9884\u5904\u7406\u914d\u7f6e\u6587\u4ef6\u751f\u6210":[[12,"config-sh"]],"dump module":[[2,null]],"example module":[[3,null]],"export module":[[4,null]],"export.sh \u6a21\u578b\u5bfc\u51fa":[[12,"export-sh"]],"file_model module":[[5,null]],"gen api html & pdf by sphinx":[[6,null]],"import_model module":[[7,null]],"infer module":[[9,null]],"load.sh \u6a21\u578b\u5bfc\u5165":[[12,"load-sh"]],"netrans documentation":[[8,null]],"netrans module":[[11,null]],"netrans_cli \u4f7f\u7528":[[12,null]],"netrans_cli \u811a\u672c":[[12,"id1"]],"netrans_py":[[10,null]],"netrans_py \u4f7f\u7528":[[13,null]],"quantize module":[[14,null]],"quantize.sh \u6a21\u578b\u91cf\u5316":[[12,"quantize-sh"]],"quantize_hb module":[[15,null]],"setup module":[[17,null]],"utils module":[[18,null]],"\u4f7f\u7528 Netrans \u7f16\u8bd1 yolov5s \u6a21\u578b":[[16,"netrans-yolov5s"]],"\u4f7f\u7528 autodoc + Sphinx \u5b9e\u73b0 python api \u6587\u6863(html)":[[6,"autodoc-sphinx-python-api-html"]],"\u4f7f\u7528 autodoc + Sphinx \u5b9e\u73b0 python api \u6587\u6863(pdf)":[[6,"autodoc-sphinx-python-api-pdf"]],"\u4f7f\u7528 netrans_cli \u7f16\u8bd1 yolov5s":[[16,"netrans-cli-yolov5s"]],"\u4f7f\u7528 netrans_py \u7f16\u8bd1 yolov5s \u6a21\u578b":[[16,"netrans-py-yolov5s"]],"\u4f7f\u7528\u793a\u4f8b":[[12,"id2"],[13,"id1"]],"\u5b89\u88c5Netrans":[[16,"netrans"]],"\u5bfc\u5165\u6a21\u578b":[[16,"id3"]],"\u5bfc\u51fa\u6a21\u578b":[[16,"id6"]],"\u5e38\u89c1\u62a5\u9519":[[6,"id1"]],"\u5feb\u901f\u5165\u95e8":[[16,null]],"\u751f\u6210\u914d\u7f6e\u6587\u4ef6\u6a21\u677f":[[16,"id4"]],"\u7cfb\u7edf\u73af\u5883":[[16,"id2"]],"\u91cf\u5316\u6a21\u578b":[[16,"id5"]],"\u9644\u5f55":[[0,null]]},"docnames":["appendix","config","dump","example","export","file_model","gen_api","import_model","index","infer","modules","netrans","netrans_cli","netrans_py","quantize","quantize_hb","quick_start_guide","setup","utils"],"envversion":{"sphinx":65,"sphinx.domains.c":3,"sphinx.domains.changeset":1,"sphinx.domains.citation":1,"sphinx.domains.cpp":9,"sphinx.domains.index":1,"sphinx.domains.javascript":3,"sphinx.domains.math":2,"sphinx.domains.python":4,"sphinx.domains.rst":2,"sphinx.domains.std":2,"sphinx.ext.viewcode":1},"filenames":["appendix.rst","config.rst","dump.rst","example.rst","export.rst","file_model.rst","gen_api.md","import_model.rst","index.rst","infer.rst","modules.rst","netrans.rst","netrans_cli.md","netrans_py.md","quantize.rst","quantize_hb.rst","quick_start_guide.md","setup.rst","utils.rst"],"indexentries":{"attributecopier\uff08utils \u4e2d\u7684\u7c7b\uff09":[[18,"utils.AttributeCopier",false]],"check_dir()\uff08\u5728 utils \u6a21\u5757\u4e2d\uff09":[[18,"utils.check_dir",false]],"check_env()\uff08\u5728 utils \u6a21\u5757\u4e2d\uff09":[[18,"utils.check_env",false]],"check_netrans()\uff08\u5728 utils \u6a21\u5757\u4e2d\uff09":[[18,"utils.check_netrans",false]],"check_path()\uff08\u5728 utils \u6a21\u5757\u4e2d\uff09":[[18,"utils.check_path",false]],"check_status()\uff08\u5728 import_model \u6a21\u5757\u4e2d\uff09":[[7,"import_model.check_status",false]],"config":[[1,"module-config",false]],"config\uff08config \u4e2d\u7684\u7c7b\uff09":[[1,"config.Config",false]],"copy_attribute_name() \uff08utils.attributecopier \u65b9\u6cd5\uff09":[[18,"utils.AttributeCopier.copy_attribute_name",false]],"create_cls\uff08utils \u4e2d\u7684\u7c7b\uff09":[[18,"utils.create_cls",false]],"example":[[3,"module-example",false]],"export":[[4,"module-export",false]],"export_network() \uff08export.export \u65b9\u6cd5\uff09":[[4,"export.Export.export_network",false]],"export\uff08export \u4e2d\u7684\u7c7b\uff09":[[4,"export.Export",false]],"file_model":[[5,"module-file_model",false]],"import_caffe_network()\uff08\u5728 import_model \u6a21\u5757\u4e2d\uff09":[[7,"import_model.import_caffe_network",false]],"import_darknet_network()\uff08\u5728 import_model \u6a21\u5757\u4e2d\uff09":[[7,"import_model.import_darknet_network",false]],"import_model":[[7,"module-import_model",false]],"import_network() \uff08import_model.importmodel \u65b9\u6cd5\uff09":[[7,"import_model.ImportModel.import_network",false]],"import_onnx_network()\uff08\u5728 import_model \u6a21\u5757\u4e2d\uff09":[[7,"import_model.import_onnx_network",false]],"import_pytorch_network()\uff08\u5728 import_model \u6a21\u5757\u4e2d\uff09":[[7,"import_model.import_pytorch_network",false]],"import_tensorflow_network()\uff08\u5728 import_model \u6a21\u5757\u4e2d\uff09":[[7,"import_model.import_tensorflow_network",false]],"import_tflite_network()\uff08\u5728 import_model \u6a21\u5757\u4e2d\uff09":[[7,"import_model.import_tflite_network",false]],"importmodel\uff08import_model \u4e2d\u7684\u7c7b\uff09":[[7,"import_model.ImportModel",false]],"infer":[[9,"module-infer",false]],"inference_network() \uff08infer.infer \u65b9\u6cd5\uff09":[[9,"infer.Infer.inference_network",false]],"infer\uff08infer \u4e2d\u7684\u7c7b\uff09":[[9,"infer.Infer",false]],"inputmeta_gen() \uff08config.config \u65b9\u6cd5\uff09":[[1,"config.Config.inputmeta_gen",false]],"main()\uff08\u5728 example \u6a21\u5757\u4e2d\uff09":[[3,"example.main",false]],"main()\uff08\u5728 export \u6a21\u5757\u4e2d\uff09":[[4,"export.main",false]],"main()\uff08\u5728 infer \u6a21\u5757\u4e2d\uff09":[[9,"infer.main",false]],"main()\uff08\u5728 quantize_hb \u6a21\u5757\u4e2d\uff09":[[15,"quantize_hb.main",false]],"module":[[1,"module-config",false],[3,"module-example",false],[4,"module-export",false],[5,"module-file_model",false],[7,"module-import_model",false],[9,"module-infer",false],[14,"module-quantize",false],[15,"module-quantize_hb",false],[18,"module-utils",false]],"quantize":[[14,"module-quantize",false]],"quantize_hb":[[15,"module-quantize_hb",false]],"quantize_network() \uff08quantize.quantize \u65b9\u6cd5\uff09":[[14,"quantize.Quantize.quantize_network",false]],"quantize_network() \uff08quantize_hb.quantize \u65b9\u6cd5\uff09":[[15,"quantize_hb.Quantize.quantize_network",false]],"quantize\uff08quantize \u4e2d\u7684\u7c7b\uff09":[[14,"quantize.Quantize",false]],"quantize\uff08quantize_hb \u4e2d\u7684\u7c7b\uff09":[[15,"quantize_hb.Quantize",false]],"remove_history_file()\uff08\u5728 utils \u6a21\u5757\u4e2d\uff09":[[18,"utils.remove_history_file",false]],"utils":[[18,"module-utils",false]]},"objects":{"":[[1,0,0,"-","config"],[3,0,0,"-","example"],[4,0,0,"-","export"],[5,0,0,"-","file_model"],[7,0,0,"-","import_model"],[9,0,0,"-","infer"],[11,0,0,"-","netrans"],[14,0,0,"-","quantize"],[15,0,0,"-","quantize_hb"],[18,0,0,"-","utils"]],"config":[[1,1,1,"","Config"]],"config.Config":[[1,2,1,"","inputmeta_gen"]],"example":[[3,3,1,"","main"]],"export":[[4,1,1,"","Export"],[4,3,1,"","main"]],"export.Export":[[4,2,1,"","export_network"]],"import_model":[[7,1,1,"","ImportModel"],[7,3,1,"","check_status"],[7,3,1,"","import_caffe_network"],[7,3,1,"","import_darknet_network"],[7,3,1,"","import_onnx_network"],[7,3,1,"","import_pytorch_network"],[7,3,1,"","import_tensorflow_network"],[7,3,1,"","import_tflite_network"]],"import_model.ImportModel":[[7,2,1,"","import_network"]],"infer":[[9,1,1,"","Infer"],[9,3,1,"","main"]],"infer.Infer":[[9,2,1,"","inference_network"]],"quantize":[[14,1,1,"","Quantize"]],"quantize.Quantize":[[14,2,1,"","quantize_network"]],"quantize_hb":[[15,1,1,"","Quantize"],[15,3,1,"","main"]],"quantize_hb.Quantize":[[15,2,1,"","quantize_network"]],"utils":[[18,1,1,"","AttributeCopier"],[18,3,1,"","check_dir"],[18,3,1,"","check_env"],[18,3,1,"","check_netrans"],[18,3,1,"","check_path"],[18,1,1,"","create_cls"],[18,3,1,"","remove_history_file"]],"utils.AttributeCopier":[[18,2,1,"","copy_attribute_name"]]},"objnames":{"0":["py","module","Python \u6a21\u5757"],"1":["py","class","Python \u7c7b"],"2":["py","method","Python \u65b9\u6cd5"],"3":["py","function","Python \u51fd\u6570"]},"objtypes":{"0":"py:module","1":"py:class","2":"py:method","3":"py:function"},"terms":{"0039":13,"003921568627":16,"017":12,"04":[12,16],"09":12,"10":6,"103":12,"116":12,"123":12,"125":13,"127":13,"128":13,"17":12,"1725178":12,"1727201":12,"19":12,"20":[12,13,16],"2018":12,"2025":6,"21":12,"224":12,"30":[13,16],"33":16,"3396":12,"3400":12,"35":12,"50":[12,13],"55":12,"553":12,"5553":12,"620":6,"67":12,"69":12,"694912":12,"721":6,"78":12,"80":12,"8gb":16,"94":12,"948":12,"_netran":18,"_setlocal":6,"_static":6,"_templat":6,"activ":16,"add_preproc_nod":12,"affin":12,"alabast":6,"align":[12,13],"all":6,"allow":12,"api":[0,8],"apidoc":6,"app":[6,16],"append":6,"apr":12,"arg":[1,4,7,9,14,15],"asymmetr":12,"asymmetric_affin":[12,16],"attributecopi":[1,4,7,9,10,14,15,18],"author":6,"autodoc":0,"bash":16,"bashrc":16,"batch":12,"bin":[6,13],"binari":[8,12],"bool":[13,18],"br":13,"build":6,"builder":6,"built":6,"by":[0,8],"caff":[7,12],"call":6,"categori":[6,12],"ccyh":[6,12],"cd":[6,16],"check_dir":[10,18],"check_env":[10,18],"check_netran":[10,18],"check_path":[10,18],"check_status":[7,10],"cla":14,"class":[1,4,7,9,14,15,18],"cli":[8,12,16],"clone":16,"cmd":[6,7],"cn":16,"com":16,"conda":16,"conf":6,"config":[0,8,10,16],"configur":6,"copy_attribute_nam":[10,18],"copyright":6,"creat":16,"create_cl":[10,18],"css":6,"ctex":6,"darknet":[7,12,13],"dat":12,"data":[12,13,16],"data_0":12,"databas":12,"dataset":[12,16],"debian12":16,"disallow":12,"doc":6,"docstr":6,"document":6,"download":16,"dtype":12,"dump":[0,10],"echo":16,"en":6,"env":6,"error":6,"etc":16,"exampl":[0,10,12,13,16],"exclude_pattern":6,"exit":6,"export":[0,6,8,10,16],"export_network":[4,10],"ext":6,"extens":6,"fals":[12,13,18],"fasl":13,"file":[6,12],"file_model":[0,10],"fit":12,"float":[12,13],"float32":12,"fontset":6,"for":6,"forg":16,"from":13,"full":6,"func":18,"gen":[0,8],"general":6,"generat":12,"git":16,"github":16,"gitlink":16,"googl":6,"graph":8,"h5fs":12,"home":[6,16],"hope":12,"html":[0,8],"html_static_path":6,"html_theme":6,"https":[6,16],"imag":12,"image_bgra":12,"image_gray":12,"image_i420":12,"image_nv12":12,"image_rgb":12,"image_rgb888_planar":12,"image_rgb888_planar_sep":12,"image_yuv420":12,"image_yuv444":12,"import":[6,8],"import_caffe_network":[7,10],"import_darknet_network":[7,10],"import_model":[0,10],"import_network":[7,10],"import_onnx_network":[7,10],"import_pytorch_network":[7,10],"import_tensorflow_network":[7,10],"import_tflite_network":[7,10],"importmodel":[7,10],"in":[6,12],"index":6,"infer":[0,10],"inference_network":[9,10],"inform":6,"input_0":12,"input_meta":12,"inputmeta":[12,13],"inputmeta_filepath":13,"inputmeta_gen":[1,10],"inputs_output":12,"inputtensor":12,"install_path":16,"int":13,"int16":[12,13],"int8":[12,13],"introduct":13,"jpg":[12,16],"js":6,"json":[12,13,16],"jun":12,"karg":[1,4,7,9,14,15],"kera":12,"label_0":12,"labels_tensor":12,"languag":6,"last":6,"latest":16,"latex":6,"latexodf":6,"layout":12,"lc":6,"lc_all":6,"left":[12,13],"lenet":12,"lenet_inputmeta":12,"lib":6,"lid":12,"line":6,"linux":16,"list":[6,13],"lite":12,"lmdb":12,"load":[8,16],"local":6,"lrt":12,"ls":12,"main":[3,4,6,9,10,15],"make":6,"mamba":16,"markdown":6,"master":6,"md":[6,13,16],"mean":[12,13],"mena":13,"miniforg":16,"miniforge3":[6,16],"mkdir":16,"model2nbg":8,"model_nam":12,"model_name_inputmeta":13,"model_path":13,"modul":[0,6,10],"most":6,"my_modul":6,"name":[7,18],"napoleon":6,"nb":[8,12,16],"nbg":[8,12,16],"nchw":12,"nertan":[13,16,18],"net":13,"netan":13,"netran":[0,1,4,6,7,10,12,14,18],"netrans_c":[6,8,13],"netrans_path":[7,13,18],"netrans_pi":[0,6,8],"network":[8,12],"network_binari":[12,16],"network_nam":18,"ngb":4,"nhwc":12,"normal":13,"normliz":13,"notadirectoryerror":18,"nov":12,"npi":12,"ntran":[12,13],"nudt_dsp":16,"numpi":6,"object":18,"of":[6,7],"onli":12,"onnx":[7,12,16],"option":6,"org":[6,16],"os":6,"output":6,"ovxlib":12,"packag":6,"param":14,"path":[6,12],"pb":12,"pdf":[0,8],"pdflatex":6,"pnna":[8,12,13],"pnnacc":[1,4,7,14],"port":12,"preproc_node_param":12,"preproc_perm":12,"preproc_typ":12,"preprocess":12,"profil":16,"project":6,"py":[6,8,13,16],"python":[0,8,13,16],"python3":[6,16],"pytorch":[7,12],"quantiz":[0,8,10,15,16],"quantize_data_typ":12,"quantize_hb":[0,10],"quantize_network":[10,14,15],"quantize_typ":13,"quantized_typ":18,"quickstart":6,"ram":16,"readm":16,"recent":6,"redirect_to_output":12,"releas":[6,16],"remove_history_fil":[10,18],"restructuredtext":6,"result":7,"return":[6,7],"reverse_channel":[12,13],"rst":0,"run":7,"runtim":12,"rw":12,"rwxr":12,"scale":[12,13,16],"see":6,"set":6,"setlocal":6,"setup":16,"sh":[8,16],"shape":12,"site":6,"size":12,"sourc":[6,16],"source_obj":[1,4,7,9,14,15,18],"source_suffix":6,"spars":12,"sphinx":[0,8],"sphinx_rtd_them":6,"sqlite":12,"str":[7,13,18],"subprocess":7,"subprocrss":7,"sucess":12,"support":12,"sys":6,"tab":12,"templates_path":6,"tensor":12,"tensor_nam":12,"tensorflow":[7,12],"tex":6,"text":12,"tflite":7,"the":6,"this":12,"toctre":6,"total":12,"traceback":6,"true":[12,13],"txt":[12,16],"type":[12,14],"ubuntu":[6,16],"uint8":[12,13,16,18],"unam":16,"undefin":12,"unit8":16,"unsupport":6,"us":6,"usag":6,"usapackag":6,"usepackag":6,"utf":6,"utf8":6,"util":[0,10],"valu":[6,12],"verbos":18,"viewcod":6,"vim":6,"wget":16,"width":[12,13],"wksp":[12,13,16],"www":6,"x86_64":16,"xelatex":6,"xj":6,"xr":12,"yaml":12,"yml":[12,13,16],"yolo_netran":13,"yolov4_tini":13,"yolov5":8,"yolov5s_asymmetric_affin":16,"yolov5s_inputmeta":16,"zh":6},"titles":["\u9644\u5f55","config module","dump module","example module","export module","file_model module","gen api html & pdf by sphinx","import_model module","netrans documentation","infer module","netrans_py","netrans module","netrans_cli \u4f7f\u7528","netrans_py \u4f7f\u7528","quantize module","quantize_hb module","\u5feb\u901f\u5165\u95e8","setup module","utils module"],"titleterms":{"and":[],"api":6,"autodoc":6,"by":6,"config":[1,12,13],"content":8,"document":8,"dump":2,"exampl":3,"export":[4,12,13],"file_model":5,"gen":6,"html":6,"import":13,"import_model":7,"indic":[],"infer":9,"load":12,"model2nbg":13,"modul":[1,2,3,4,5,7,9,11,14,15,17,18],"nbg":13,"netran":[8,11,13,16],"netrans_c":[12,16],"netrans_pi":[10,13,16],"pdf":6,"python":6,"quantiz":[12,13,14],"quantize_hb":15,"rst":6,"setup":17,"sh":12,"sphinx":6,"tabl":[],"util":18,"yolov5":16}}) \ No newline at end of file diff --git a/docs/html/setup.html b/docs/html/setup.html new file mode 100644 index 0000000..e08d197 --- /dev/null +++ b/docs/html/setup.html @@ -0,0 +1,107 @@ + + + + + + + + setup module — netrans 0.1 文档 + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +
+

setup module

+
+ + +
+ +
+
+ +
+
+ + + + + + + \ No newline at end of file diff --git a/docs/html/utils.html b/docs/html/utils.html new file mode 100644 index 0000000..5850cc0 --- /dev/null +++ b/docs/html/utils.html @@ -0,0 +1,180 @@ + + + + + + + + utils module — netrans 0.1 文档 + + + + + + + + + + + + + + + + + + + +
+
+
+ + +
+ +
+

utils module

+
+
+class utils.AttributeCopier(source_obj)[源代码]
+

基类:object

+

快速解析复制 Netrans 信息

+
+
+copy_attribute_name(source_obj)[源代码]
+
+ +
+ +
+
+utils.check_dir(network_name)[源代码]
+

判断工程目录是否存在

+
+
参数:
+

network_name (str) -- 工程目录路径

+
+
抛出:
+

NotADirectoryError -- 没有那个工程目录

+
+
+
+ +
+
+utils.check_env(name)[源代码]
+
+ +
+
+utils.check_netrans(netrans)[源代码]
+

判断 netrans 是否配置成功

+
+
参数:
+

netrans (str, bool) -- _netrans 路径, 如果没有配置(默认为False)会去环境变量里找

+
+
抛出:
+

NotADirectoryError -- 找不到 Netrans 会返回 NotADirectoryError

+
+
+
+ +
+
+utils.check_path(func)[源代码]
+

装饰器, 确保在工程目录运行 nertans

+
+ +
+
+class utils.create_cls(netrans_path, name, quantized_type='uint8', verbose=False)[源代码]
+

基类:object

+

快速测试时候模拟实例化Netrans

+
+ +
+
+utils.remove_history_file(name)[源代码]
+
+ +
+ + +
+ +
+
+ +
+
+ + + + + + + \ No newline at end of file diff --git a/docs/netrans.pdf b/docs/netrans.pdf new file mode 100644 index 0000000..8c3ae89 Binary files /dev/null and b/docs/netrans.pdf differ diff --git a/netrans_py/utils.py b/netrans_py/utils.py index 14803c7..4fb669a 100644 --- a/netrans_py/utils.py +++ b/netrans_py/utils.py @@ -99,6 +99,7 @@ class create_cls(): #dataclass @netrans_params self.verbose=verbose self.quantize_type = quantized_type self.profile = False + # if __name__ == "__main__": # dir_name = "yolo"