Deploy TensorFlow model

Hi everyone,

I hope everyone is fine. First of all I’d like to thanks django for giving the possibility to beginners to learn and build cool stuffs in web dev.

Also, I am currently working on a project where I need to deploy a tf model (for object detection) as a frozen_inference_model.pb. I have adapted the code from this python file (https://github.com/tensorflow/models/blob/master/research/object_detection/inference/detection_inference.py) into my view. However, when I try to send the POST request, after 2s, the server is not reached.
Do you have any clues on this problem please?

Thanks !

HI @hdrekz13

Welcome to the forum.

It’s hard to answer your question without more specific details than “the server is not reached”. Do you receive a response or not? Do you see something in your logs?

Did you read FAQ: Getting Help and especially the Stack Overflow guide on asking good questions ?

Thanks,

Adam

Hi Adam,

Thanks you for you answer, sorry for my question, it was not very clear indeed.
Please find enclosed the inference.py file within my app folder:

#importation

import os

import shutil

import glob

import re

import numpy as np

import six.moves.urllib as urllib

import sys

import tarfile

import tensorflow as tf

import zipfile

import matplotlib

import tkinter

from collections import defaultdict

from io import StringIO

from matplotlib import pyplot as plt

from PIL import Image

from object_detection.utils import ops as utils_ops

from object_detection.utils import label_map_util

from object_detection.utils import visualization_utils as vis_util

#------------------------------------------------------------------------------------

def pre_processing(num_classes):

#-----------------------------Repertories

output_directory="./model/1"

# List of the strings that is used to add correct label for each box.

PATH_TO_LABELS = os.path.join(os.path.abspath(output_directory), "label_map.pbtxt")

#-----------------------------List of the strings that is used to correct labels

label_map = label_map_util.load_labelmap(PATH_TO_LABELS)

categories = label_map_util.convert_label_map_to_categories(label_map, max_num_classes=num_classes, use_display_name=True)

category_index = label_map_util.create_category_index(categories)

return(category_index)

#Turn the image into an array that is compatible with the model

def load_image_into_numpy_array(image):

(im_width, im_height) = image.size

return np.array(image.getdata()).reshape((im_height, im_width, 3)).astype(np.uint8)

#Function definition for inference on a single image

def run_inference_for_single_image(image, graph):

with graph.as_default():

    with tf.Session() as sess:

        # Get handles to input and output tensors

        ops = tf.compat.v1.get_default_graph().get_operations()

        all_tensor_names = {output.name for op in ops for output in op.outputs}

        tensor_dict = {}

        for key in ['num_detections', 'detection_boxes', 'detection_scores','detection_classes', 'detection_masks']:

            tensor_name = key + ':0'

            if tensor_name in all_tensor_names:

                tensor_dict[key] = tf.compat.v1.get_default_graph().get_tensor_by_name(tensor_name)

        if 'detection_masks' in tensor_dict:

            # The following processing is only for single image

            detection_boxes = tf.squeeze(tensor_dict['detection_boxes'], [0])

            detection_masks = tf.squeeze(tensor_dict['detection_masks'], [0])

            # Reframe is required to translate mask from box coordinates to image coordinates and fit the image size.

            real_num_detection = tf.cast(tensor_dict['num_detections'][0], tf.int32)

            detection_boxes = tf.slice(detection_boxes, [0, 0], [real_num_detection, -1])

            detection_masks = tf.slice(detection_masks, [0, 0, 0], [real_num_detection, -1, -1])

            detection_masks_reframed = utils_ops.reframe_box_masks_to_image_masks(detection_masks, detection_boxes, image.shape[0], image.shape[1])

            detection_masks_reframed = tf.cast(tf.greater(detection_masks_reframed, 0.5), tf.uint8)

            # Follow the convention by adding back the batch dimension

            tensor_dict['detection_masks'] = tf.expand_dims(detection_masks_reframed, 0)

        image_tensor = tf.compat.v1.get_default_graph().get_tensor_by_name('image_tensor:0')

        # Run inference

        output_dict = sess.run(tensor_dict,feed_dict={image_tensor: np.expand_dims(image, 0)})

        # all outputs are float32 numpy arrays, so convert types as appropriate

        output_dict['num_detections'] = int(output_dict['num_detections'][0])

        output_dict['detection_classes'] = output_dict['detection_classes'][0].astype(np.uint8)

        output_dict['detection_boxes'] = output_dict['detection_boxes'][0]

        output_dict['detection_scores'] = output_dict['detection_scores'][0]

        if 'detection_masks' in output_dict:

            output_dict['detection_masks'] = output_dict['detection_masks'][0]

#Retourne un tenseur sous forme de dictionnaire

return output_dict

And please find the views.py within my app folder:

from django.http import HttpResponse

from django.shortcuts import render

from django.core.files.storage import FileSystemStorage

from django.contrib.auth.decorators import login_required

from .models import Case

from .inference import *

@login_required

def home(request):

    """ dsqq """

    return render(request,'analyzis.html')

def predictImage(request):

    #Path for the uploaded image

    fileObj=request.FILES['filePath']

    fs=FileSystemStorage()

    filePathName=fs.save(fileObj.name,fileObj)

    filePathName=fs.url(filePathName)

    #Deep Learning model

    #Launch graph

    import os

    import shutil

    import glob

    import re

    import numpy as np

    import six.moves.urllib as urllib

    import sys

    import tarfile

    import tensorflow as tf

    import zipfile

    import matplotlib

    import tkinter

    from collections import defaultdict

    from io import StringIO

    from matplotlib import pyplot as plt

    from PIL import Image

    from object_detection.utils import ops as utils_ops

    from object_detection.utils import label_map_util

    from object_detection.utils import visualization_utils as vis_util

    # Path and variables to frozen detection graph.

    output_directory="./model/1"

    PATH_TO_CKPT = os.path.join(os.path.abspath(output_directory), "frozen_inference_graph.pb")

    num_classes=32

    #Load graph

    detection_graph = tf.compat.v1.Graph()

    with detection_graph.as_default():

        od_graph_def = tf.compat.v1.GraphDef()

        with tf.io.gfile.GFile(PATH_TO_CKPT, 'rb') as fid:

            serialized_graph = fid.read()

            od_graph_def.ParseFromString(serialized_graph)

            tf.import_graph_def(od_graph_def, name='')

    #Preprocessing

    category_index=pre_processing(num_classes)

    image = Image.open(filePathName) 

    image_np = load_image_into_numpy_array(image)

    output_dict = run_inference_for_single_image(image_np, detection_graph)

    context={'filePathName':filePathName}

    #Inference

    run_inference_for_single_image(image_np,detection_graph)

    #Bounding boxes ands save image

    # Visualization of the results of a detection.

    vis_util.visualize_boxes_and_labels_on_image_array(

        image_np,

        output_dict['detection_boxes'],

        output_dict['detection_classes'],

        output_dict['detection_scores'],

        category_index,

        instance_masks=output_dict.get('detection_masks'),

        use_normalized_coordinates=True,

        line_thickness=1)

    image.save("modif_pic.jpg")

    return render(request,'analyzis.html', context)

def predictImage2(request):

    fileObj=request.FILES['filePath']

    fs=FileSystemStorage()

    filePathName=fs.save(fileObj.name,fileObj)

    filePathName=fs.url(filePathName)

    context={'filePathName':filePathName}

    return render(request,'analyzis.html', context)

The view predict2 works. The view predictimage does not work. When I try to use this view in the same way than the other one (by submitting the uploaded image), I have this problem: This site can’t be reached

127.0.0.1 refused to connect.

I think it may be due to thetime it takes to load my graph model (about 50mo). Do you know if it is possible to change this time? Or do you know another thing that could prevent the server from loading the model ?

Thanks a lot !

Hi hdrekz,

If your tasks take a long time before rendering you will face issues.
But first use the debugger to be sure that the error is due to time consuming.
And try to reproduce it by replacing your code with time.sleep(some time).

Once you are sure it’s due to time consuming you can try this :

Good luck

Hi Francois,

Thanks for your answer. It seems that the first problem appears when I use the object detection API functions from TF. The first one is label_map = label_map_util.load_labelmap(PATH_TO_LABELS). It works well if I don’t go through django webapp. So I will check if this is indeed the response time which is too long with your tip.