Fixed a bug in input validation for torch.compile options (#21787)

* Fixed bug in input validation for torch.compile options

* Added default device if device is None

* Addressed PR comments

---------

Co-authored-by: Ilya Lavrenov <ilya.lavrenov@intel.com>
This commit is contained in:
Surya Siddharth Pemmaraju 2023-12-22 01:51:08 -08:00 committed by GitHub
parent c59ddbab69
commit a88679aeb8
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
3 changed files with 23 additions and 15 deletions

View File

@ -117,7 +117,7 @@ def fx_openvino(subgraph, example_inputs, options):
executor_parameters = None
inputs_reversed = False
openvino_model_caching = _get_model_caching(options)
if openvino_model_caching is not None:
if openvino_model_caching is not None and openvino_model_caching:
# Create a hash to be used for caching
model_hash_str = sha256(subgraph.code.encode('utf-8')).hexdigest()
executor_parameters = {"model_hash_str": model_hash_str}
@ -127,7 +127,7 @@ def fx_openvino(subgraph, example_inputs, options):
maybe_fs_cached_name = cached_model_name(model_hash_str + "_fs", _get_device(options), example_inputs, _get_cache_dir(options))
if os.path.isfile(maybe_fs_cached_name + ".xml") and os.path.isfile(maybe_fs_cached_name + ".bin"):
# Model is fully supported and already cached. Run the cached OV model directly.
compiled_model = openvino_compile_cached_model(maybe_fs_cached_name, *example_inputs)
compiled_model = openvino_compile_cached_model(maybe_fs_cached_name, options, *example_inputs)
def _call(*args):
res = execute_cached(compiled_model, *args)
return res

View File

@ -13,7 +13,7 @@ def _get_device(options) -> Optional[Any]:
core = Core()
device = "CPU"
if "device" in options:
if options is not None and "device" in options:
device = options["device"]
else:
device = os.getenv("OPENVINO_TORCH_BACKEND_DEVICE")
@ -24,13 +24,14 @@ def _get_device(options) -> Optional[Any]:
+ device
+ " is not in the list of OpenVINO Available Devices"
)
else:
device = "CPU"
return device
def _is_cache_dir_in_config(options) -> Optional[Any]:
if "config" in options:
if options is not None and "config" in options:
cfg = options["config"]
if "CACHE_DIR" in cfg:
if cfg is not None and "CACHE_DIR" in cfg:
return True
return False
@ -50,11 +51,20 @@ def _get_cache_dir(options) -> Optional[Any]:
def _get_model_caching(options) -> Optional[Any]:
if options is not None and "model_caching" in options:
return options["model_caching"]
caching = options["model_caching"]
if bool(caching) and str(caching).lower() not in ["false", "0"]:
return True
else:
return False
else:
return os.getenv("OPENVINO_TORCH_MODEL_CACHING")
caching = os.getenv("OPENVINO_TORCH_MODEL_CACHING")
if caching is not None and caching.lower() not in ["false", "0"]:
return True
else:
return False
def _get_config(options) -> Optional[Any]:
if options is not None and "config" in options:
return options["config"]
return options["config"]
return {}

View File

@ -43,7 +43,7 @@ def cached_model_name(model_hash_str, device, args, cache_root, reversed = False
return file_name
def openvino_compile_cached_model(cached_model_path, *example_inputs, options):
def openvino_compile_cached_model(cached_model_path, options, *example_inputs):
core = Core()
om = core.read_model(cached_model_path + ".xml")
@ -118,13 +118,11 @@ def openvino_compile(gm: GraphModule, *args, model_hash_str: str = None, options
om.inputs[idx].get_node().set_partial_shape(PartialShape(list(input_data.shape)))
om.validate_nodes_and_infer_types()
config = {}
config = _get_config(options)
if model_hash_str is not None:
if _is_cache_dir_in_config(options):
config = _get_config(options)
else:
if not _is_cache_dir_in_config(options):
config["CACHE_DIR"] = cache_root
compiled = core.compile_model(om, device, config)
return compiled
return compiled