This Jupyter notebook can be launched on-line, opening an interactive environment in a browser window.
You can also make a local installation. Choose one of the following options:
A very basic introduction to using segmentation models with OpenVINO™.
In this tutorial, a pre-trained
road-segmentation-adas-0001
model from the Open Model
Zoo is used.
ADAS stands for Advanced Driver Assistance Services. The model
recognizes four classes: background, road, curb and mark.
frompathlibimportPathbase_model_dir=Path("./model").expanduser()model_name="road-segmentation-adas-0001"model_xml_name=f"{model_name}.xml"model_bin_name=f"{model_name}.bin"model_xml_path=base_model_dir/model_xml_nameifnotmodel_xml_path.exists():model_xml_url=("https://storage.openvinotoolkit.org/repositories/open_model_zoo/2023.0/models_bin/1/road-segmentation-adas-0001/FP32/road-segmentation-adas-0001.xml")model_bin_url=("https://storage.openvinotoolkit.org/repositories/open_model_zoo/2023.0/models_bin/1/road-segmentation-adas-0001/FP32/road-segmentation-adas-0001.bin")download_file(model_xml_url,model_xml_name,base_model_dir)download_file(model_bin_url,model_bin_name,base_model_dir)else:print(f"{model_name} already downloaded to {base_model_dir}")
# Download the image from the openvino_notebooks storageimage_filename=download_file("https://storage.openvinotoolkit.org/repositories/openvino_notebooks/data/data/image/empty_road_mapillary.jpg",directory="data",)# The segmentation network expects images in BGR format.image=cv2.imread(str(image_filename))rgb_image=cv2.cvtColor(image,cv2.COLOR_BGR2RGB)image_h,image_w,_=image.shape# N,C,H,W = batch size, number of channels, height, width.N,C,H,W=input_layer_ir.shape# OpenCV resize expects the destination size as (width, height).resized_image=cv2.resize(image,(W,H))# Reshape to the network input shape.input_image=np.expand_dims(resized_image.transpose(2,0,1),0)plt.imshow(rgb_image)
# Run the inference.result=compiled_model([input_image])[output_layer_ir]# Prepare data for visualization.segmentation_mask=np.argmax(result,axis=1)plt.imshow(segmentation_mask.transpose(1,2,0))
# Define colormap, each color represents a class.colormap=np.array([[68,1,84],[48,103,141],[53,183,120],[199,216,52]])# Define the transparency of the segmentation mask on the photo.alpha=0.3# Use function from notebook_utils.py to transform mask to an RGB image.mask=segmentation_map_to_image(segmentation_mask,colormap)resized_mask=cv2.resize(mask,(image_w,image_h))# Create an image with mask.image_with_mask=cv2.addWeighted(resized_mask,alpha,rgb_image,1-alpha,0)
# Define titles with images.data={"Base Photo":rgb_image,"Segmentation":mask,"Masked Photo":image_with_mask}# Create a subplot to visualize images.fig,axs=plt.subplots(1,len(data.items()),figsize=(15,10))# Fill the subplot.forax,(name,image)inzip(axs,data.items()):ax.axis("off")ax.set_title(name)ax.imshow(image)# Display an image.plt.show(fig)