You signed in with another tab or window. Reload to refresh your session.You signed out in another tab or window. Reload to refresh your session.You switched accounts on another tab or window. Reload to refresh your session.Dismiss alert
I'm able to test the baseline accuracy of the pre-quantized model, however, when applying AutoQuant, I get the following error:
ValueError: in user code:
File "/tmp/ipykernel_1850/3844288644.py", line 30, in eval_callback *
sess.run(tf.compat.v1.global_variables_initializer())
ValueError: Argument `fetch` = name: "init"
op: "NoOp"
device: "/device:CPU:0"
cannot be interpreted as a Tensor. (Operation name: "init"
op: "NoOp"
device: "/device:CPU:0"
is not an element of this graph.)
Below is the relevant code snippet, my current setup is WSL2 using an NVIDIA CUDA-enabled GPU.
def preprocess_image(image_path, pipeline):
"""Loads and preprocesses a single image."""
image = tf.io.read_file(image_path)
image = tf.image.decode_image(image, dtype=tf.float32)
image /= pipeline.MAX_PIXEL_VALUE
return image
def prepare_unlabeled_dataset(pipeline):
"""Prepare an unlabeled dataset from the image directory."""
# image_paths, _ = pipeline.load_data()
# preprocessed_images = pipeline.preprocess_images(image_paths)
# return tf.data.Dataset.from_tensor_slices(preprocessed_images).batch(pipeline.batch_size)
image_paths, _ = pipeline.load_data()
dataset = tf.data.Dataset.from_tensor_slices(image_paths)
# Map the paths to the actual preprocessed images
dataset = dataset.map(lambda path: preprocess_image(path, pipeline),
num_parallel_calls=tf.data.AUTOTUNE)
dataset = dataset.batch(pipeline.batch_size)
return dataset
@tf.function
def eval_callback(sess: tf.compat.v1.Session, num_samples: Optional[int] = None) -> float:
"""Evaluate the segmentation model using TensorFlow session."""
if num_samples is None:
num_samples = len(pipeline.load_data()[0])
sampled_dataset = iter(prepare_unlabeled_dataset(pipeline).take(num_samples))
sess.run(tf.compat.v1.global_variables_initializer())
input_tensor = sess.graph.get_tensor_by_name(pipeline.model.input.name)
output_tensor = sess.graph.get_tensor_by_name(pipeline.model.output.name)
preds = []
truths = []
for images in sampled_dataset:
# Run inference
predictions = sess.run(output_tensor, feed_dict={input_tensor: images})
preds.append(predictions)
# Calculate evaluation score
preds = np.array(preds).flatten()
truths = np.ones_like(preds) # Placeholder since dataset is unlabeled
accuracy = np.mean(preds > pipeline.TH_FIRE == truths)
return accuracy
I'm able to test the baseline accuracy of the pre-quantized model, however, when applying AutoQuant, I get the following error:
Below is the relevant code snippet, my current setup is WSL2 using an NVIDIA CUDA-enabled GPU.
The text was updated successfully, but these errors were encountered: