Add support of IE model cache in time_tests (#5192)
This commit is contained in:
parent
127f931a5e
commit
a70d13f9e2
@ -0,0 +1,543 @@
|
||||
- device:
|
||||
name: CPU
|
||||
model:
|
||||
path: ${VPUX_MODELS_PKG}/resnet-50-pytorch/caffe2/FP16/resnet-50-pytorch.xml
|
||||
name: resnet-50-pytorch
|
||||
precision: FP16
|
||||
framework: caffe2
|
||||
use_model_cache: true
|
||||
- device:
|
||||
name: GPU
|
||||
model:
|
||||
path: ${VPUX_MODELS_PKG}/resnet-50-pytorch/caffe2/FP16/resnet-50-pytorch.xml
|
||||
name: resnet-50-pytorch
|
||||
precision: FP16
|
||||
framework: caffe2
|
||||
use_model_cache: true
|
||||
- device:
|
||||
name: CPU
|
||||
model:
|
||||
path: ${VPUX_MODELS_PKG}/resnet-50-pytorch/caffe2/FP16-INT8/resnet-50-pytorch.xml
|
||||
name: resnet-50-pytorch
|
||||
precision: FP16-INT8
|
||||
framework: caffe2
|
||||
use_model_cache: true
|
||||
- device:
|
||||
name: GPU
|
||||
model:
|
||||
path: ${VPUX_MODELS_PKG}/resnet-50-pytorch/caffe2/FP16-INT8/resnet-50-pytorch.xml
|
||||
name: resnet-50-pytorch
|
||||
precision: FP16-INT8
|
||||
framework: caffe2
|
||||
use_model_cache: true
|
||||
- device:
|
||||
name: CPU
|
||||
model:
|
||||
path: ${VPUX_MODELS_PKG}/mobilenet-v2/caffe2/FP16/mobilenet-v2.xml
|
||||
name: mobilenet-v2
|
||||
precision: FP16
|
||||
framework: caffe2
|
||||
use_model_cache: true
|
||||
- device:
|
||||
name: GPU
|
||||
model:
|
||||
path: ${VPUX_MODELS_PKG}/mobilenet-v2/caffe2/FP16/mobilenet-v2.xml
|
||||
name: mobilenet-v2
|
||||
precision: FP16
|
||||
framework: caffe2
|
||||
use_model_cache: true
|
||||
- device:
|
||||
name: CPU
|
||||
model:
|
||||
path: ${VPUX_MODELS_PKG}/mobilenet-v2/caffe2/FP16-INT8/mobilenet-v2.xml
|
||||
name: mobilenet-v2
|
||||
precision: FP16-INT8
|
||||
framework: caffe2
|
||||
use_model_cache: true
|
||||
- device:
|
||||
name: GPU
|
||||
model:
|
||||
path: ${VPUX_MODELS_PKG}/mobilenet-v2/caffe2/FP16-INT8/mobilenet-v2.xml
|
||||
name: mobilenet-v2
|
||||
precision: FP16-INT8
|
||||
framework: caffe2
|
||||
use_model_cache: true
|
||||
- device:
|
||||
name: CPU
|
||||
model:
|
||||
path: ${VPUX_MODELS_PKG}/faster_rcnn_resnet101_coco/tf/FP16/faster_rcnn_resnet101_coco.xml
|
||||
name: faster_rcnn_resnet101_coco
|
||||
precision: FP16
|
||||
framework: tf
|
||||
use_model_cache: true
|
||||
- device:
|
||||
name: GPU
|
||||
model:
|
||||
path: ${VPUX_MODELS_PKG}/faster_rcnn_resnet101_coco/tf/FP16/faster_rcnn_resnet101_coco.xml
|
||||
name: faster_rcnn_resnet101_coco
|
||||
precision: FP16
|
||||
framework: tf
|
||||
use_model_cache: true
|
||||
- device:
|
||||
name: CPU
|
||||
model:
|
||||
path: ${VPUX_MODELS_PKG}/faster_rcnn_resnet101_coco/tf/FP16-INT8/faster_rcnn_resnet101_coco.xml
|
||||
name: faster_rcnn_resnet101_coco
|
||||
precision: FP16-INT8
|
||||
framework: tf
|
||||
use_model_cache: true
|
||||
- device:
|
||||
name: GPU
|
||||
model:
|
||||
path: ${VPUX_MODELS_PKG}/faster_rcnn_resnet101_coco/tf/FP16-INT8/faster_rcnn_resnet101_coco.xml
|
||||
name: faster_rcnn_resnet101_coco
|
||||
precision: FP16-INT8
|
||||
framework: tf
|
||||
use_model_cache: true
|
||||
- device:
|
||||
name: CPU
|
||||
model:
|
||||
path: ${VPUX_MODELS_PKG}/faster-rcnn-resnet101-coco-sparse-60-0001/tf/FP16/faster-rcnn-resnet101-coco-sparse-60-0001.xml
|
||||
name: faster-rcnn-resnet101-coco-sparse-60-0001
|
||||
precision: FP16
|
||||
framework: tf
|
||||
use_model_cache: true
|
||||
- device:
|
||||
name: GPU
|
||||
model:
|
||||
path: ${VPUX_MODELS_PKG}/faster-rcnn-resnet101-coco-sparse-60-0001/tf/FP16/faster-rcnn-resnet101-coco-sparse-60-0001.xml
|
||||
name: faster-rcnn-resnet101-coco-sparse-60-0001
|
||||
precision: FP16
|
||||
framework: tf
|
||||
use_model_cache: true
|
||||
- device:
|
||||
name: CPU
|
||||
model:
|
||||
path: ${VPUX_MODELS_PKG}/faster-rcnn-resnet101-coco-sparse-60-0001/tf/FP16-INT8/faster-rcnn-resnet101-coco-sparse-60-0001.xml
|
||||
name: faster-rcnn-resnet101-coco-sparse-60-0001
|
||||
precision: FP16-INT8
|
||||
framework: tf
|
||||
use_model_cache: true
|
||||
- device:
|
||||
name: GPU
|
||||
model:
|
||||
path: ${VPUX_MODELS_PKG}/faster-rcnn-resnet101-coco-sparse-60-0001/tf/FP16-INT8/faster-rcnn-resnet101-coco-sparse-60-0001.xml
|
||||
name: faster-rcnn-resnet101-coco-sparse-60-0001
|
||||
precision: FP16-INT8
|
||||
framework: tf
|
||||
use_model_cache: true
|
||||
- device:
|
||||
name: CPU
|
||||
model:
|
||||
path: ${VPUX_MODELS_PKG}/googlenet-v1/tf/FP16/googlenet-v1.xml
|
||||
name: googlenet-v1
|
||||
precision: FP16
|
||||
framework: tf
|
||||
use_model_cache: true
|
||||
- device:
|
||||
name: GPU
|
||||
model:
|
||||
path: ${VPUX_MODELS_PKG}/googlenet-v1/tf/FP16/googlenet-v1.xml
|
||||
name: googlenet-v1
|
||||
precision: FP16
|
||||
framework: tf
|
||||
use_model_cache: true
|
||||
- device:
|
||||
name: CPU
|
||||
model:
|
||||
path: ${VPUX_MODELS_PKG}/googlenet-v1/tf/FP16-INT8/googlenet-v1.xml
|
||||
name: googlenet-v1
|
||||
precision: FP16-INT8
|
||||
framework: tf
|
||||
use_model_cache: true
|
||||
- device:
|
||||
name: GPU
|
||||
model:
|
||||
path: ${VPUX_MODELS_PKG}/googlenet-v1/tf/FP16-INT8/googlenet-v1.xml
|
||||
name: googlenet-v1
|
||||
precision: FP16-INT8
|
||||
framework: tf
|
||||
use_model_cache: true
|
||||
- device:
|
||||
name: CPU
|
||||
model:
|
||||
path: ${VPUX_MODELS_PKG}/googlenet-v3/tf/FP16/googlenet-v3.xml
|
||||
name: googlenet-v3
|
||||
precision: FP16
|
||||
framework: tf
|
||||
use_model_cache: true
|
||||
- device:
|
||||
name: GPU
|
||||
model:
|
||||
path: ${VPUX_MODELS_PKG}/googlenet-v3/tf/FP16/googlenet-v3.xml
|
||||
name: googlenet-v3
|
||||
precision: FP16
|
||||
framework: tf
|
||||
use_model_cache: true
|
||||
- device:
|
||||
name: CPU
|
||||
model:
|
||||
path: ${VPUX_MODELS_PKG}/googlenet-v3/tf/FP16-INT8/googlenet-v3.xml
|
||||
name: googlenet-v3
|
||||
precision: FP16-INT8
|
||||
framework: tf
|
||||
use_model_cache: true
|
||||
- device:
|
||||
name: GPU
|
||||
model:
|
||||
path: ${VPUX_MODELS_PKG}/googlenet-v3/tf/FP16-INT8/googlenet-v3.xml
|
||||
name: googlenet-v3
|
||||
precision: FP16-INT8
|
||||
framework: tf
|
||||
use_model_cache: true
|
||||
- device:
|
||||
name: CPU
|
||||
model:
|
||||
path: ${VPUX_MODELS_PKG}/ssd512/caffe/FP16/ssd512.xml
|
||||
name: ssd512
|
||||
precision: FP16
|
||||
framework: caffe
|
||||
use_model_cache: true
|
||||
- device:
|
||||
name: GPU
|
||||
model:
|
||||
path: ${VPUX_MODELS_PKG}/ssd512/caffe/FP16/ssd512.xml
|
||||
name: ssd512
|
||||
precision: FP16
|
||||
framework: caffe
|
||||
use_model_cache: true
|
||||
- device:
|
||||
name: CPU
|
||||
model:
|
||||
path: ${VPUX_MODELS_PKG}/ssd512/caffe/FP16-INT8/ssd512.xml
|
||||
name: ssd512
|
||||
precision: FP16-INT8
|
||||
framework: caffe
|
||||
use_model_cache: true
|
||||
- device:
|
||||
name: GPU
|
||||
model:
|
||||
path: ${VPUX_MODELS_PKG}/ssd512/caffe/FP16-INT8/ssd512.xml
|
||||
name: ssd512
|
||||
precision: FP16-INT8
|
||||
framework: caffe
|
||||
use_model_cache: true
|
||||
- device:
|
||||
name: CPU
|
||||
model:
|
||||
path: ${VPUX_MODELS_PKG}/yolo-v2-ava-0001/tf/FP16/yolo-v2-ava-0001.xml
|
||||
name: yolo-v2-ava-0001
|
||||
precision: FP16
|
||||
framework: tf
|
||||
use_model_cache: true
|
||||
- device:
|
||||
name: GPU
|
||||
model:
|
||||
path: ${VPUX_MODELS_PKG}/yolo-v2-ava-0001/tf/FP16/yolo-v2-ava-0001.xml
|
||||
name: yolo-v2-ava-0001
|
||||
precision: FP16
|
||||
framework: tf
|
||||
use_model_cache: true
|
||||
- device:
|
||||
name: CPU
|
||||
model:
|
||||
path: ${VPUX_MODELS_PKG}/yolo-v2-ava-0001/tf/FP16-INT8/yolo-v2-ava-0001.xml
|
||||
name: yolo-v2-ava-0001
|
||||
precision: FP16-INT8
|
||||
framework: tf
|
||||
use_model_cache: true
|
||||
- device:
|
||||
name: GPU
|
||||
model:
|
||||
path: ${VPUX_MODELS_PKG}/yolo-v2-ava-0001/tf/FP16-INT8/yolo-v2-ava-0001.xml
|
||||
name: yolo-v2-ava-0001
|
||||
precision: FP16-INT8
|
||||
framework: tf
|
||||
use_model_cache: true
|
||||
- device:
|
||||
name: CPU
|
||||
model:
|
||||
path: ${VPUX_MODELS_PKG}/yolo-v2-ava-sparse-35-0001/tf/FP16/yolo-v2-ava-sparse-35-0001.xml
|
||||
name: yolo-v2-ava-sparse-35-0001
|
||||
precision: FP16
|
||||
framework: tf
|
||||
use_model_cache: true
|
||||
- device:
|
||||
name: GPU
|
||||
model:
|
||||
path: ${VPUX_MODELS_PKG}/yolo-v2-ava-sparse-35-0001/tf/FP16/yolo-v2-ava-sparse-35-0001.xml
|
||||
name: yolo-v2-ava-sparse-35-0001
|
||||
precision: FP16
|
||||
framework: tf
|
||||
use_model_cache: true
|
||||
- device:
|
||||
name: CPU
|
||||
model:
|
||||
path: ${VPUX_MODELS_PKG}/yolo-v2-ava-sparse-35-0001/tf/FP16-INT8/yolo-v2-ava-sparse-35-0001.xml
|
||||
name: yolo-v2-ava-sparse-35-0001
|
||||
precision: FP16-INT8
|
||||
framework: tf
|
||||
use_model_cache: true
|
||||
- device:
|
||||
name: GPU
|
||||
model:
|
||||
path: ${VPUX_MODELS_PKG}/yolo-v2-ava-sparse-35-0001/tf/FP16-INT8/yolo-v2-ava-sparse-35-0001.xml
|
||||
name: yolo-v2-ava-sparse-35-0001
|
||||
precision: FP16-INT8
|
||||
framework: tf
|
||||
use_model_cache: true
|
||||
- device:
|
||||
name: CPU
|
||||
model:
|
||||
path: ${VPUX_MODELS_PKG}/yolo-v2-ava-sparse-70-0001/tf/FP16/yolo-v2-ava-sparse-70-0001.xml
|
||||
name: yolo-v2-ava-sparse-70-0001
|
||||
precision: FP16
|
||||
framework: tf
|
||||
use_model_cache: true
|
||||
- device:
|
||||
name: GPU
|
||||
model:
|
||||
path: ${VPUX_MODELS_PKG}/yolo-v2-ava-sparse-70-0001/tf/FP16/yolo-v2-ava-sparse-70-0001.xml
|
||||
name: yolo-v2-ava-sparse-70-0001
|
||||
precision: FP16
|
||||
framework: tf
|
||||
use_model_cache: true
|
||||
- device:
|
||||
name: CPU
|
||||
model:
|
||||
path: ${VPUX_MODELS_PKG}/yolo-v2-ava-sparse-70-0001/tf/FP16-INT8/yolo-v2-ava-sparse-70-0001.xml
|
||||
name: yolo-v2-ava-sparse-70-0001
|
||||
precision: FP16-INT8
|
||||
framework: tf
|
||||
use_model_cache: true
|
||||
- device:
|
||||
name: GPU
|
||||
model:
|
||||
path: ${VPUX_MODELS_PKG}/yolo-v2-ava-sparse-70-0001/tf/FP16-INT8/yolo-v2-ava-sparse-70-0001.xml
|
||||
name: yolo-v2-ava-sparse-70-0001
|
||||
precision: FP16-INT8
|
||||
framework: tf
|
||||
use_model_cache: true
|
||||
- device:
|
||||
name: CPU
|
||||
model:
|
||||
path: ${VPUX_MODELS_PKG}/yolo-v2-tiny-ava-0001/tf/FP16/yolo-v2-tiny-ava-0001.xml
|
||||
name: yolo-v2-tiny-ava-0001
|
||||
precision: FP16
|
||||
framework: tf
|
||||
use_model_cache: true
|
||||
- device:
|
||||
name: GPU
|
||||
model:
|
||||
path: ${VPUX_MODELS_PKG}/yolo-v2-tiny-ava-0001/tf/FP16/yolo-v2-tiny-ava-0001.xml
|
||||
name: yolo-v2-tiny-ava-0001
|
||||
precision: FP16
|
||||
framework: tf
|
||||
use_model_cache: true
|
||||
- device:
|
||||
name: CPU
|
||||
model:
|
||||
path: ${VPUX_MODELS_PKG}/yolo-v2-tiny-ava-0001/tf/FP16-INT8/yolo-v2-tiny-ava-0001.xml
|
||||
name: yolo-v2-tiny-ava-0001
|
||||
precision: FP16-INT8
|
||||
framework: tf
|
||||
use_model_cache: true
|
||||
- device:
|
||||
name: GPU
|
||||
model:
|
||||
path: ${VPUX_MODELS_PKG}/yolo-v2-tiny-ava-0001/tf/FP16-INT8/yolo-v2-tiny-ava-0001.xml
|
||||
name: yolo-v2-tiny-ava-0001
|
||||
precision: FP16-INT8
|
||||
framework: tf
|
||||
use_model_cache: true
|
||||
- device:
|
||||
name: CPU
|
||||
model:
|
||||
path: ${VPUX_MODELS_PKG}/yolo-v2-tiny-ava-sparse-30-0001/tf/FP16/yolo-v2-tiny-ava-sparse-30-0001.xml
|
||||
name: yolo-v2-tiny-ava-sparse-30-0001
|
||||
precision: FP16
|
||||
framework: tf
|
||||
use_model_cache: true
|
||||
- device:
|
||||
name: GPU
|
||||
model:
|
||||
path: ${VPUX_MODELS_PKG}/yolo-v2-tiny-ava-sparse-30-0001/tf/FP16/yolo-v2-tiny-ava-sparse-30-0001.xml
|
||||
name: yolo-v2-tiny-ava-sparse-30-0001
|
||||
precision: FP16
|
||||
framework: tf
|
||||
use_model_cache: true
|
||||
- device:
|
||||
name: CPU
|
||||
model:
|
||||
path: ${VPUX_MODELS_PKG}/yolo-v2-tiny-ava-sparse-30-0001/tf/FP16-INT8/yolo-v2-tiny-ava-sparse-30-0001.xml
|
||||
name: yolo-v2-tiny-ava-sparse-30-0001
|
||||
precision: FP16-INT8
|
||||
framework: tf
|
||||
use_model_cache: true
|
||||
- device:
|
||||
name: GPU
|
||||
model:
|
||||
path: ${VPUX_MODELS_PKG}/yolo-v2-tiny-ava-sparse-30-0001/tf/FP16-INT8/yolo-v2-tiny-ava-sparse-30-0001.xml
|
||||
name: yolo-v2-tiny-ava-sparse-30-0001
|
||||
precision: FP16-INT8
|
||||
framework: tf
|
||||
use_model_cache: true
|
||||
- device:
|
||||
name: CPU
|
||||
model:
|
||||
path: ${VPUX_MODELS_PKG}/yolo-v2-tiny-ava-sparse-60-0001/tf/FP16/yolo-v2-tiny-ava-sparse-60-0001.xml
|
||||
name: yolo-v2-tiny-ava-sparse-60-0001
|
||||
precision: FP16
|
||||
framework: tf
|
||||
use_model_cache: true
|
||||
- device:
|
||||
name: GPU
|
||||
model:
|
||||
path: ${VPUX_MODELS_PKG}/yolo-v2-tiny-ava-sparse-60-0001/tf/FP16/yolo-v2-tiny-ava-sparse-60-0001.xml
|
||||
name: yolo-v2-tiny-ava-sparse-60-0001
|
||||
precision: FP16
|
||||
framework: tf
|
||||
use_model_cache: true
|
||||
- device:
|
||||
name: CPU
|
||||
model:
|
||||
path: ${VPUX_MODELS_PKG}/yolo-v2-tiny-ava-sparse-60-0001/tf/FP16-INT8/yolo-v2-tiny-ava-sparse-60-0001.xml
|
||||
name: yolo-v2-tiny-ava-sparse-60-0001
|
||||
precision: FP16-INT8
|
||||
framework: tf
|
||||
use_model_cache: true
|
||||
- device:
|
||||
name: GPU
|
||||
model:
|
||||
path: ${VPUX_MODELS_PKG}/yolo-v2-tiny-ava-sparse-60-0001/tf/FP16-INT8/yolo-v2-tiny-ava-sparse-60-0001.xml
|
||||
name: yolo-v2-tiny-ava-sparse-60-0001
|
||||
precision: FP16-INT8
|
||||
framework: tf
|
||||
use_model_cache: true
|
||||
- device:
|
||||
name: CPU
|
||||
model:
|
||||
path: ${VPUX_MODELS_PKG}/squeezenet1.1/caffe2/FP16/squeezenet1.1.xml
|
||||
name: squeezenet1.1
|
||||
precision: FP16
|
||||
framework: caffe2
|
||||
use_model_cache: true
|
||||
- device:
|
||||
name: GPU
|
||||
model:
|
||||
path: ${VPUX_MODELS_PKG}/squeezenet1.1/caffe2/FP16/squeezenet1.1.xml
|
||||
name: squeezenet1.1
|
||||
precision: FP16
|
||||
framework: caffe2
|
||||
use_model_cache: true
|
||||
- device:
|
||||
name: CPU
|
||||
model:
|
||||
path: ${VPUX_MODELS_PKG}/squeezenet1.1/caffe2/FP16-INT8/squeezenet1.1.xml
|
||||
name: squeezenet1.1
|
||||
precision: FP16-INT8
|
||||
framework: caffe2
|
||||
use_model_cache: true
|
||||
- device:
|
||||
name: GPU
|
||||
model:
|
||||
path: ${VPUX_MODELS_PKG}/squeezenet1.1/caffe2/FP16-INT8/squeezenet1.1.xml
|
||||
name: squeezenet1.1
|
||||
precision: FP16-INT8
|
||||
framework: caffe2
|
||||
use_model_cache: true
|
||||
- device:
|
||||
name: CPU
|
||||
model:
|
||||
path: ${VPUX_MODELS_PKG}/icnet-camvid-ava-0001/tf/FP16/icnet-camvid-ava-0001.xml
|
||||
name: icnet-camvid-ava-0001
|
||||
precision: FP16
|
||||
framework: tf
|
||||
use_model_cache: true
|
||||
- device:
|
||||
name: GPU
|
||||
model:
|
||||
path: ${VPUX_MODELS_PKG}/icnet-camvid-ava-0001/tf/FP16/icnet-camvid-ava-0001.xml
|
||||
name: icnet-camvid-ava-0001
|
||||
precision: FP16
|
||||
framework: tf
|
||||
use_model_cache: true
|
||||
- device:
|
||||
name: CPU
|
||||
model:
|
||||
path: ${VPUX_MODELS_PKG}/icnet-camvid-ava-0001/tf/FP16-INT8/icnet-camvid-ava-0001.xml
|
||||
name: icnet-camvid-ava-0001
|
||||
precision: FP16-INT8
|
||||
framework: tf
|
||||
use_model_cache: true
|
||||
- device:
|
||||
name: GPU
|
||||
model:
|
||||
path: ${VPUX_MODELS_PKG}/icnet-camvid-ava-0001/tf/FP16-INT8/icnet-camvid-ava-0001.xml
|
||||
name: icnet-camvid-ava-0001
|
||||
precision: FP16-INT8
|
||||
framework: tf
|
||||
use_model_cache: true
|
||||
- device:
|
||||
name: CPU
|
||||
model:
|
||||
path: ${VPUX_MODELS_PKG}/icnet-camvid-ava-sparse-30-0001/tf/FP16/icnet-camvid-ava-sparse-30-0001.xml
|
||||
name: icnet-camvid-ava-sparse-30-0001
|
||||
precision: FP16
|
||||
framework: tf
|
||||
use_model_cache: true
|
||||
- device:
|
||||
name: GPU
|
||||
model:
|
||||
path: ${VPUX_MODELS_PKG}/icnet-camvid-ava-sparse-30-0001/tf/FP16/icnet-camvid-ava-sparse-30-0001.xml
|
||||
name: icnet-camvid-ava-sparse-30-0001
|
||||
precision: FP16
|
||||
framework: tf
|
||||
use_model_cache: true
|
||||
- device:
|
||||
name: CPU
|
||||
model:
|
||||
path: ${VPUX_MODELS_PKG}/icnet-camvid-ava-sparse-30-0001/tf/FP16-INT8/icnet-camvid-ava-sparse-30-0001.xml
|
||||
name: icnet-camvid-ava-sparse-30-0001
|
||||
precision: FP16-INT8
|
||||
framework: tf
|
||||
use_model_cache: true
|
||||
- device:
|
||||
name: GPU
|
||||
model:
|
||||
path: ${VPUX_MODELS_PKG}/icnet-camvid-ava-sparse-30-0001/tf/FP16-INT8/icnet-camvid-ava-sparse-30-0001.xml
|
||||
name: icnet-camvid-ava-sparse-30-0001
|
||||
precision: FP16-INT8
|
||||
framework: tf
|
||||
use_model_cache: true
|
||||
- device:
|
||||
name: CPU
|
||||
model:
|
||||
path: ${VPUX_MODELS_PKG}/icnet-camvid-ava-sparse-60-0001/tf/FP16/icnet-camvid-ava-sparse-60-0001.xml
|
||||
name: icnet-camvid-ava-sparse-60-0001
|
||||
precision: FP16
|
||||
framework: tf
|
||||
use_model_cache: true
|
||||
- device:
|
||||
name: GPU
|
||||
model:
|
||||
path: ${VPUX_MODELS_PKG}/icnet-camvid-ava-sparse-60-0001/tf/FP16/icnet-camvid-ava-sparse-60-0001.xml
|
||||
name: icnet-camvid-ava-sparse-60-0001
|
||||
precision: FP16
|
||||
framework: tf
|
||||
use_model_cache: true
|
||||
- device:
|
||||
name: CPU
|
||||
model:
|
||||
path: ${VPUX_MODELS_PKG}/icnet-camvid-ava-sparse-60-0001/tf/FP16-INT8/icnet-camvid-ava-sparse-60-0001.xml
|
||||
name: icnet-camvid-ava-sparse-60-0001
|
||||
precision: FP16-INT8
|
||||
framework: tf
|
||||
use_model_cache: true
|
||||
- device:
|
||||
name: GPU
|
||||
model:
|
||||
path: ${VPUX_MODELS_PKG}/icnet-camvid-ava-sparse-60-0001/tf/FP16-INT8/icnet-camvid-ava-sparse-60-0001.xml
|
||||
name: icnet-camvid-ava-sparse-60-0001
|
||||
precision: FP16-INT8
|
||||
framework: tf
|
@ -133,13 +133,14 @@ def temp_dir(pytestconfig):
|
||||
|
||||
|
||||
@pytest.fixture(scope="function")
|
||||
def cl_cache_dir(pytestconfig):
|
||||
def cl_cache_dir(pytestconfig, instance):
|
||||
"""Generate directory to save OpenCL cache before test run and clean up after run.
|
||||
|
||||
Folder `cl_cache` should be created in a directory where tests were run. In this case
|
||||
cache will be saved correctly. This behaviour is OS independent.
|
||||
More: https://github.com/intel/compute-runtime/blob/master/opencl/doc/FAQ.md#how-can-cl_cache-be-enabled
|
||||
"""
|
||||
if instance["device"]["name"] == "GPU":
|
||||
cl_cache_dir = pytestconfig.invocation_dir / "cl_cache"
|
||||
# if cl_cache generation to a local `cl_cache` folder doesn't work, specify
|
||||
# `cl_cache_dir` environment variable in an attempt to fix it (Linux specific)
|
||||
@ -147,8 +148,28 @@ def cl_cache_dir(pytestconfig):
|
||||
if cl_cache_dir.exists():
|
||||
shutil.rmtree(cl_cache_dir)
|
||||
cl_cache_dir.mkdir()
|
||||
logging.info("cl_cache will be created in {}".format(cl_cache_dir))
|
||||
yield cl_cache_dir
|
||||
shutil.rmtree(cl_cache_dir)
|
||||
else:
|
||||
yield None
|
||||
|
||||
|
||||
@pytest.fixture(scope="function")
|
||||
def model_cache_dir(pytestconfig, instance):
|
||||
"""
|
||||
Generate directory to IE model cache before test run and clean up after run.
|
||||
"""
|
||||
if instance.get("use_model_cache"):
|
||||
model_cache_dir = pytestconfig.invocation_dir / "models_cache"
|
||||
if model_cache_dir.exists():
|
||||
shutil.rmtree(model_cache_dir)
|
||||
model_cache_dir.mkdir()
|
||||
logging.info("model_cache will be created in {}".format(model_cache_dir))
|
||||
yield model_cache_dir
|
||||
shutil.rmtree(model_cache_dir)
|
||||
else:
|
||||
yield None
|
||||
|
||||
|
||||
@pytest.fixture(scope="function")
|
||||
|
@ -25,7 +25,7 @@ from test_runner.utils import expand_env_vars
|
||||
REFS_FACTOR = 1.2 # 120%
|
||||
|
||||
|
||||
def test_timetest(instance, executable, niter, cl_cache_dir, test_info, temp_dir, validate_test_case,
|
||||
def test_timetest(instance, executable, niter, cl_cache_dir, model_cache_dir, test_info, temp_dir, validate_test_case,
|
||||
prepare_db_info):
|
||||
"""Parameterized test.
|
||||
|
||||
@ -33,6 +33,7 @@ def test_timetest(instance, executable, niter, cl_cache_dir, test_info, temp_dir
|
||||
:param executable: timetest executable to run
|
||||
:param niter: number of times to run executable
|
||||
:param cl_cache_dir: directory to store OpenCL cache
|
||||
:param model_cache_dir: directory to store IE model cache
|
||||
:param test_info: custom `test_info` field of built-in `request` pytest fixture
|
||||
:param temp_dir: path to a temporary directory. Will be cleaned up after test run
|
||||
:param validate_test_case: custom pytest fixture. Should be declared as test argument to be enabled
|
||||
@ -55,13 +56,13 @@ def test_timetest(instance, executable, niter, cl_cache_dir, test_info, temp_dir
|
||||
"device": instance["device"]["name"],
|
||||
"niter": niter
|
||||
}
|
||||
if exe_args["device"] == "GPU":
|
||||
# Generate cl_cache via additional timetest run
|
||||
_exe_args = exe_args.copy()
|
||||
_exe_args["niter"] = 1
|
||||
logging.info("Run timetest once to generate cl_cache to {}".format(cl_cache_dir))
|
||||
run_timetest(_exe_args, log=logging)
|
||||
logging.info("Run timetest once to generate any cache")
|
||||
retcode, _, _ = run_timetest({**exe_args, "niter": 1}, log=logging)
|
||||
assert retcode == 0, "Run of executable for warm up failed"
|
||||
if cl_cache_dir:
|
||||
assert os.listdir(cl_cache_dir), "cl_cache isn't generated"
|
||||
if model_cache_dir:
|
||||
assert os.listdir(model_cache_dir), "model_cache isn't generated"
|
||||
|
||||
retcode, aggr_stats, raw_stats = run_timetest(exe_args, log=logging)
|
||||
assert retcode == 0, "Run of executable failed"
|
||||
|
@ -76,7 +76,7 @@ def filter_timetest_result(stats: dict):
|
||||
iqr, q1, q3 = calculate_iqr(time_results)
|
||||
cut_off = iqr * IQR_CUTOFF
|
||||
upd_time_results = [x for x in time_results if (q1 - cut_off < x < q3 + cut_off)]
|
||||
filtered_stats.update({step_name: upd_time_results})
|
||||
filtered_stats.update({step_name: upd_time_results if upd_time_results else time_results})
|
||||
return filtered_stats
|
||||
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user