Text Recognize using tesseract












1















Hello I'm trying to recognize text from Image using Tesseract but unable to get result.
I'm using EAST technique to detect text. I've one more question how can I extend padding of the box. cv2.putText does not work in this case.
original code for text detection: https://github.com/opencv/opencv/blob/master/samples/dnn/text_detection.cpp



import cv2
import numpy as np
import argparse
import time
import math
import matplotlib.pyplot as plt
import skimage.io as io
import os
from imutils.object_detection import non_max_suppression
import pytesseract
print(np.__version__)

def decode_predictions(scores, geometry):
**# grab the number of rows and columns from the scores volume, then
# initialize our set of bounding box rectangles and corresponding
# confidence scores**
(numRows, numCols) = scores.shape[2:4]
boxes =
confidences =

**# loop over the number of rows**
for y in range(0, numRows):
**# extract the scores (probabilities), followed by the geometrical
# data used to derive potential bounding box coordinates that
# surround text**
scoresData = scores[0, 0, y]
xData0 = geometry[0, 0, y]
xData1 = geometry[0, 1, y]
xData2 = geometry[0, 2, y]
xData3 = geometry[0, 3, y]
anglesData = geometry[0, 4, y]

**# loop over the number of columns**
for x in range(0, numCols):
**# if our score does not have sufficient probability, ignore it**
if scoresData[x] < args["min_confidence"]:
continue

**# compute the offset factor as our resulting feature maps will
# be 4x smaller than the input image**
(offsetX, offsetY) = (x * 4.0, y * 4.0)

**# extract the rotation angle for the prediction and then
# compute the sin and cosine**
angle = anglesData[x]
cos = np.cos(angle)
sin = np.sin(angle)

**# use the geometry volume to derive the width and height of
# the bounding box**
h = xData0[x] + xData2[x]
w = xData1[x] + xData3[x]

**# compute the rotated rect for
# the text prediction bounding box**
offset = (offsetX + (cos * xData1[x]) + (sin * xData2[x]), offsetY - (sin * xData1[x]) + (cos * xData2[x]))
p1 = (-sin * h + offset[0], -cos * h + offset[1])
p3 = (-cos * w + offset[0], sin * w + offset[1])
center = (0.5*(p1[0]+p3[0]), 0.5*(p1[1]+p3[1]))

**# add the bounding box coordinates and probability score to
# our respective lists**
boxes.append((center, (w,h), -angle * 180.0 / math.pi))
confidences.append(float(scoresData[x]))
return (boxes, confidences)
args = {
"image":"C:\Users\ckunwar\Test_Images\licence_plate1\52.jpg",
"east": "frozen_east_text_detection.pb",
"min_confidence":0.25,
"nms_thresh": 0.24,
"width":480,
"height":320,
"padding":0.0
}

**# load the input image and grab the image dimensions**
image = cv2.imread(args["image"])
orig = image.copy()
(H, W) = image.shape[:2]
#print(H,W)
**# set the new width and height and then determine the ratio in change
# for both the width and height**
(newW, newH) = (args["width"], args["height"])
rW = W / float(newW)
rH = H / float(newH)

**# resize the image and grab the new image dimensions**
image = cv2.resize(image, (newW, newH))
(H, W) = image.shape[:2]

**# define the two output layer names for the EAST detector model that
# we are interested -- the first is the output probabilities and the
# second can be used to derive the bounding box coordinates of text**
layerNames = ["feature_fusion/Conv_7/Sigmoid","feature_fusion/concat_3"]

**# load the pre-trained EAST text detector**
print("[INFO] loading EAST text detector...")
net = cv2.dnn.readNet(args["east"])

**# construct a blob from the image and then perform a forward pass of
# the model to obtain the two output layer sets**
blob = cv2.dnn.blobFromImage(image, 1.0, (W, H),(123.68, 116.78, 103.94), swapRB=True, crop=False)
start = time.time()
net.setInput(blob)
(scores, geometry) = net.forward(layerNames)
end = time.time()

**# show timing information on text prediction**
print("[INFO] text detection took {:.6f} seconds".format(end - start))

(boxes, confidences) = decode_predictions(scores, geometry)

**# apply non-maxima suppression to suppress weak, overlapping bounding boxes**
indices = cv2.dnn.NMSBoxesRotated(boxes, confidences, args["min_confidence"], args["nms_thresh"])

results =


**# loop over the bounding boxes**
for i in indices:
**# get 4 corners of the rotated rect**
vertices = cv2.boxPoints(boxes[i[0]])

**# scale the bounding box coordinates based on the respective ratios**
for j in [0,1,2,3]:
vertices[j][0] *= rW
vertices[j][1] *= rH

**# draw the bounding box on the image**
for j in [0,1,2,3]:
p1 = (vertices[j][0], vertices[j][1])
p2 = (vertices[(j + 1) % 4][0], vertices[(j + 1) % 4][1])


config = ("-l eng --oem 3 --psm 11")
text = pytesseract.image_to_string(orig,config=config)

results.append(((p1,p2), text))

results = sorted(results, key=lambda r:r[0][1])

output = orig.copy()

for ((p1,p2), text) in results:
print("OCR TEXT")
print("========")
print("{}n".format(text))

text = "".join([c if ord(c) < 128 else "" for c in text]).strip()
cv2.line(output, p1, p2, (0, 255, 0), 2)
#cv2.rectangle(output, p1, p2,(0, 255, 0), 2)
cv2.putText(output, text,cv2.FONT_HERSHEY_TRIPLEX, 0.8, (0, 0, 255), 2)



**# show the output image**
#orig = cv2.cvtColor(orig, cv2.COLOR_BGR2RGB)

cv2.imshow("Text Detection", output)
cv2.waitKey(0)









share|improve this question



























    1















    Hello I'm trying to recognize text from Image using Tesseract but unable to get result.
    I'm using EAST technique to detect text. I've one more question how can I extend padding of the box. cv2.putText does not work in this case.
    original code for text detection: https://github.com/opencv/opencv/blob/master/samples/dnn/text_detection.cpp



    import cv2
    import numpy as np
    import argparse
    import time
    import math
    import matplotlib.pyplot as plt
    import skimage.io as io
    import os
    from imutils.object_detection import non_max_suppression
    import pytesseract
    print(np.__version__)

    def decode_predictions(scores, geometry):
    **# grab the number of rows and columns from the scores volume, then
    # initialize our set of bounding box rectangles and corresponding
    # confidence scores**
    (numRows, numCols) = scores.shape[2:4]
    boxes =
    confidences =

    **# loop over the number of rows**
    for y in range(0, numRows):
    **# extract the scores (probabilities), followed by the geometrical
    # data used to derive potential bounding box coordinates that
    # surround text**
    scoresData = scores[0, 0, y]
    xData0 = geometry[0, 0, y]
    xData1 = geometry[0, 1, y]
    xData2 = geometry[0, 2, y]
    xData3 = geometry[0, 3, y]
    anglesData = geometry[0, 4, y]

    **# loop over the number of columns**
    for x in range(0, numCols):
    **# if our score does not have sufficient probability, ignore it**
    if scoresData[x] < args["min_confidence"]:
    continue

    **# compute the offset factor as our resulting feature maps will
    # be 4x smaller than the input image**
    (offsetX, offsetY) = (x * 4.0, y * 4.0)

    **# extract the rotation angle for the prediction and then
    # compute the sin and cosine**
    angle = anglesData[x]
    cos = np.cos(angle)
    sin = np.sin(angle)

    **# use the geometry volume to derive the width and height of
    # the bounding box**
    h = xData0[x] + xData2[x]
    w = xData1[x] + xData3[x]

    **# compute the rotated rect for
    # the text prediction bounding box**
    offset = (offsetX + (cos * xData1[x]) + (sin * xData2[x]), offsetY - (sin * xData1[x]) + (cos * xData2[x]))
    p1 = (-sin * h + offset[0], -cos * h + offset[1])
    p3 = (-cos * w + offset[0], sin * w + offset[1])
    center = (0.5*(p1[0]+p3[0]), 0.5*(p1[1]+p3[1]))

    **# add the bounding box coordinates and probability score to
    # our respective lists**
    boxes.append((center, (w,h), -angle * 180.0 / math.pi))
    confidences.append(float(scoresData[x]))
    return (boxes, confidences)
    args = {
    "image":"C:\Users\ckunwar\Test_Images\licence_plate1\52.jpg",
    "east": "frozen_east_text_detection.pb",
    "min_confidence":0.25,
    "nms_thresh": 0.24,
    "width":480,
    "height":320,
    "padding":0.0
    }

    **# load the input image and grab the image dimensions**
    image = cv2.imread(args["image"])
    orig = image.copy()
    (H, W) = image.shape[:2]
    #print(H,W)
    **# set the new width and height and then determine the ratio in change
    # for both the width and height**
    (newW, newH) = (args["width"], args["height"])
    rW = W / float(newW)
    rH = H / float(newH)

    **# resize the image and grab the new image dimensions**
    image = cv2.resize(image, (newW, newH))
    (H, W) = image.shape[:2]

    **# define the two output layer names for the EAST detector model that
    # we are interested -- the first is the output probabilities and the
    # second can be used to derive the bounding box coordinates of text**
    layerNames = ["feature_fusion/Conv_7/Sigmoid","feature_fusion/concat_3"]

    **# load the pre-trained EAST text detector**
    print("[INFO] loading EAST text detector...")
    net = cv2.dnn.readNet(args["east"])

    **# construct a blob from the image and then perform a forward pass of
    # the model to obtain the two output layer sets**
    blob = cv2.dnn.blobFromImage(image, 1.0, (W, H),(123.68, 116.78, 103.94), swapRB=True, crop=False)
    start = time.time()
    net.setInput(blob)
    (scores, geometry) = net.forward(layerNames)
    end = time.time()

    **# show timing information on text prediction**
    print("[INFO] text detection took {:.6f} seconds".format(end - start))

    (boxes, confidences) = decode_predictions(scores, geometry)

    **# apply non-maxima suppression to suppress weak, overlapping bounding boxes**
    indices = cv2.dnn.NMSBoxesRotated(boxes, confidences, args["min_confidence"], args["nms_thresh"])

    results =


    **# loop over the bounding boxes**
    for i in indices:
    **# get 4 corners of the rotated rect**
    vertices = cv2.boxPoints(boxes[i[0]])

    **# scale the bounding box coordinates based on the respective ratios**
    for j in [0,1,2,3]:
    vertices[j][0] *= rW
    vertices[j][1] *= rH

    **# draw the bounding box on the image**
    for j in [0,1,2,3]:
    p1 = (vertices[j][0], vertices[j][1])
    p2 = (vertices[(j + 1) % 4][0], vertices[(j + 1) % 4][1])


    config = ("-l eng --oem 3 --psm 11")
    text = pytesseract.image_to_string(orig,config=config)

    results.append(((p1,p2), text))

    results = sorted(results, key=lambda r:r[0][1])

    output = orig.copy()

    for ((p1,p2), text) in results:
    print("OCR TEXT")
    print("========")
    print("{}n".format(text))

    text = "".join([c if ord(c) < 128 else "" for c in text]).strip()
    cv2.line(output, p1, p2, (0, 255, 0), 2)
    #cv2.rectangle(output, p1, p2,(0, 255, 0), 2)
    cv2.putText(output, text,cv2.FONT_HERSHEY_TRIPLEX, 0.8, (0, 0, 255), 2)



    **# show the output image**
    #orig = cv2.cvtColor(orig, cv2.COLOR_BGR2RGB)

    cv2.imshow("Text Detection", output)
    cv2.waitKey(0)









    share|improve this question

























      1












      1








      1








      Hello I'm trying to recognize text from Image using Tesseract but unable to get result.
      I'm using EAST technique to detect text. I've one more question how can I extend padding of the box. cv2.putText does not work in this case.
      original code for text detection: https://github.com/opencv/opencv/blob/master/samples/dnn/text_detection.cpp



      import cv2
      import numpy as np
      import argparse
      import time
      import math
      import matplotlib.pyplot as plt
      import skimage.io as io
      import os
      from imutils.object_detection import non_max_suppression
      import pytesseract
      print(np.__version__)

      def decode_predictions(scores, geometry):
      **# grab the number of rows and columns from the scores volume, then
      # initialize our set of bounding box rectangles and corresponding
      # confidence scores**
      (numRows, numCols) = scores.shape[2:4]
      boxes =
      confidences =

      **# loop over the number of rows**
      for y in range(0, numRows):
      **# extract the scores (probabilities), followed by the geometrical
      # data used to derive potential bounding box coordinates that
      # surround text**
      scoresData = scores[0, 0, y]
      xData0 = geometry[0, 0, y]
      xData1 = geometry[0, 1, y]
      xData2 = geometry[0, 2, y]
      xData3 = geometry[0, 3, y]
      anglesData = geometry[0, 4, y]

      **# loop over the number of columns**
      for x in range(0, numCols):
      **# if our score does not have sufficient probability, ignore it**
      if scoresData[x] < args["min_confidence"]:
      continue

      **# compute the offset factor as our resulting feature maps will
      # be 4x smaller than the input image**
      (offsetX, offsetY) = (x * 4.0, y * 4.0)

      **# extract the rotation angle for the prediction and then
      # compute the sin and cosine**
      angle = anglesData[x]
      cos = np.cos(angle)
      sin = np.sin(angle)

      **# use the geometry volume to derive the width and height of
      # the bounding box**
      h = xData0[x] + xData2[x]
      w = xData1[x] + xData3[x]

      **# compute the rotated rect for
      # the text prediction bounding box**
      offset = (offsetX + (cos * xData1[x]) + (sin * xData2[x]), offsetY - (sin * xData1[x]) + (cos * xData2[x]))
      p1 = (-sin * h + offset[0], -cos * h + offset[1])
      p3 = (-cos * w + offset[0], sin * w + offset[1])
      center = (0.5*(p1[0]+p3[0]), 0.5*(p1[1]+p3[1]))

      **# add the bounding box coordinates and probability score to
      # our respective lists**
      boxes.append((center, (w,h), -angle * 180.0 / math.pi))
      confidences.append(float(scoresData[x]))
      return (boxes, confidences)
      args = {
      "image":"C:\Users\ckunwar\Test_Images\licence_plate1\52.jpg",
      "east": "frozen_east_text_detection.pb",
      "min_confidence":0.25,
      "nms_thresh": 0.24,
      "width":480,
      "height":320,
      "padding":0.0
      }

      **# load the input image and grab the image dimensions**
      image = cv2.imread(args["image"])
      orig = image.copy()
      (H, W) = image.shape[:2]
      #print(H,W)
      **# set the new width and height and then determine the ratio in change
      # for both the width and height**
      (newW, newH) = (args["width"], args["height"])
      rW = W / float(newW)
      rH = H / float(newH)

      **# resize the image and grab the new image dimensions**
      image = cv2.resize(image, (newW, newH))
      (H, W) = image.shape[:2]

      **# define the two output layer names for the EAST detector model that
      # we are interested -- the first is the output probabilities and the
      # second can be used to derive the bounding box coordinates of text**
      layerNames = ["feature_fusion/Conv_7/Sigmoid","feature_fusion/concat_3"]

      **# load the pre-trained EAST text detector**
      print("[INFO] loading EAST text detector...")
      net = cv2.dnn.readNet(args["east"])

      **# construct a blob from the image and then perform a forward pass of
      # the model to obtain the two output layer sets**
      blob = cv2.dnn.blobFromImage(image, 1.0, (W, H),(123.68, 116.78, 103.94), swapRB=True, crop=False)
      start = time.time()
      net.setInput(blob)
      (scores, geometry) = net.forward(layerNames)
      end = time.time()

      **# show timing information on text prediction**
      print("[INFO] text detection took {:.6f} seconds".format(end - start))

      (boxes, confidences) = decode_predictions(scores, geometry)

      **# apply non-maxima suppression to suppress weak, overlapping bounding boxes**
      indices = cv2.dnn.NMSBoxesRotated(boxes, confidences, args["min_confidence"], args["nms_thresh"])

      results =


      **# loop over the bounding boxes**
      for i in indices:
      **# get 4 corners of the rotated rect**
      vertices = cv2.boxPoints(boxes[i[0]])

      **# scale the bounding box coordinates based on the respective ratios**
      for j in [0,1,2,3]:
      vertices[j][0] *= rW
      vertices[j][1] *= rH

      **# draw the bounding box on the image**
      for j in [0,1,2,3]:
      p1 = (vertices[j][0], vertices[j][1])
      p2 = (vertices[(j + 1) % 4][0], vertices[(j + 1) % 4][1])


      config = ("-l eng --oem 3 --psm 11")
      text = pytesseract.image_to_string(orig,config=config)

      results.append(((p1,p2), text))

      results = sorted(results, key=lambda r:r[0][1])

      output = orig.copy()

      for ((p1,p2), text) in results:
      print("OCR TEXT")
      print("========")
      print("{}n".format(text))

      text = "".join([c if ord(c) < 128 else "" for c in text]).strip()
      cv2.line(output, p1, p2, (0, 255, 0), 2)
      #cv2.rectangle(output, p1, p2,(0, 255, 0), 2)
      cv2.putText(output, text,cv2.FONT_HERSHEY_TRIPLEX, 0.8, (0, 0, 255), 2)



      **# show the output image**
      #orig = cv2.cvtColor(orig, cv2.COLOR_BGR2RGB)

      cv2.imshow("Text Detection", output)
      cv2.waitKey(0)









      share|improve this question














      Hello I'm trying to recognize text from Image using Tesseract but unable to get result.
      I'm using EAST technique to detect text. I've one more question how can I extend padding of the box. cv2.putText does not work in this case.
      original code for text detection: https://github.com/opencv/opencv/blob/master/samples/dnn/text_detection.cpp



      import cv2
      import numpy as np
      import argparse
      import time
      import math
      import matplotlib.pyplot as plt
      import skimage.io as io
      import os
      from imutils.object_detection import non_max_suppression
      import pytesseract
      print(np.__version__)

      def decode_predictions(scores, geometry):
      **# grab the number of rows and columns from the scores volume, then
      # initialize our set of bounding box rectangles and corresponding
      # confidence scores**
      (numRows, numCols) = scores.shape[2:4]
      boxes =
      confidences =

      **# loop over the number of rows**
      for y in range(0, numRows):
      **# extract the scores (probabilities), followed by the geometrical
      # data used to derive potential bounding box coordinates that
      # surround text**
      scoresData = scores[0, 0, y]
      xData0 = geometry[0, 0, y]
      xData1 = geometry[0, 1, y]
      xData2 = geometry[0, 2, y]
      xData3 = geometry[0, 3, y]
      anglesData = geometry[0, 4, y]

      **# loop over the number of columns**
      for x in range(0, numCols):
      **# if our score does not have sufficient probability, ignore it**
      if scoresData[x] < args["min_confidence"]:
      continue

      **# compute the offset factor as our resulting feature maps will
      # be 4x smaller than the input image**
      (offsetX, offsetY) = (x * 4.0, y * 4.0)

      **# extract the rotation angle for the prediction and then
      # compute the sin and cosine**
      angle = anglesData[x]
      cos = np.cos(angle)
      sin = np.sin(angle)

      **# use the geometry volume to derive the width and height of
      # the bounding box**
      h = xData0[x] + xData2[x]
      w = xData1[x] + xData3[x]

      **# compute the rotated rect for
      # the text prediction bounding box**
      offset = (offsetX + (cos * xData1[x]) + (sin * xData2[x]), offsetY - (sin * xData1[x]) + (cos * xData2[x]))
      p1 = (-sin * h + offset[0], -cos * h + offset[1])
      p3 = (-cos * w + offset[0], sin * w + offset[1])
      center = (0.5*(p1[0]+p3[0]), 0.5*(p1[1]+p3[1]))

      **# add the bounding box coordinates and probability score to
      # our respective lists**
      boxes.append((center, (w,h), -angle * 180.0 / math.pi))
      confidences.append(float(scoresData[x]))
      return (boxes, confidences)
      args = {
      "image":"C:\Users\ckunwar\Test_Images\licence_plate1\52.jpg",
      "east": "frozen_east_text_detection.pb",
      "min_confidence":0.25,
      "nms_thresh": 0.24,
      "width":480,
      "height":320,
      "padding":0.0
      }

      **# load the input image and grab the image dimensions**
      image = cv2.imread(args["image"])
      orig = image.copy()
      (H, W) = image.shape[:2]
      #print(H,W)
      **# set the new width and height and then determine the ratio in change
      # for both the width and height**
      (newW, newH) = (args["width"], args["height"])
      rW = W / float(newW)
      rH = H / float(newH)

      **# resize the image and grab the new image dimensions**
      image = cv2.resize(image, (newW, newH))
      (H, W) = image.shape[:2]

      **# define the two output layer names for the EAST detector model that
      # we are interested -- the first is the output probabilities and the
      # second can be used to derive the bounding box coordinates of text**
      layerNames = ["feature_fusion/Conv_7/Sigmoid","feature_fusion/concat_3"]

      **# load the pre-trained EAST text detector**
      print("[INFO] loading EAST text detector...")
      net = cv2.dnn.readNet(args["east"])

      **# construct a blob from the image and then perform a forward pass of
      # the model to obtain the two output layer sets**
      blob = cv2.dnn.blobFromImage(image, 1.0, (W, H),(123.68, 116.78, 103.94), swapRB=True, crop=False)
      start = time.time()
      net.setInput(blob)
      (scores, geometry) = net.forward(layerNames)
      end = time.time()

      **# show timing information on text prediction**
      print("[INFO] text detection took {:.6f} seconds".format(end - start))

      (boxes, confidences) = decode_predictions(scores, geometry)

      **# apply non-maxima suppression to suppress weak, overlapping bounding boxes**
      indices = cv2.dnn.NMSBoxesRotated(boxes, confidences, args["min_confidence"], args["nms_thresh"])

      results =


      **# loop over the bounding boxes**
      for i in indices:
      **# get 4 corners of the rotated rect**
      vertices = cv2.boxPoints(boxes[i[0]])

      **# scale the bounding box coordinates based on the respective ratios**
      for j in [0,1,2,3]:
      vertices[j][0] *= rW
      vertices[j][1] *= rH

      **# draw the bounding box on the image**
      for j in [0,1,2,3]:
      p1 = (vertices[j][0], vertices[j][1])
      p2 = (vertices[(j + 1) % 4][0], vertices[(j + 1) % 4][1])


      config = ("-l eng --oem 3 --psm 11")
      text = pytesseract.image_to_string(orig,config=config)

      results.append(((p1,p2), text))

      results = sorted(results, key=lambda r:r[0][1])

      output = orig.copy()

      for ((p1,p2), text) in results:
      print("OCR TEXT")
      print("========")
      print("{}n".format(text))

      text = "".join([c if ord(c) < 128 else "" for c in text]).strip()
      cv2.line(output, p1, p2, (0, 255, 0), 2)
      #cv2.rectangle(output, p1, p2,(0, 255, 0), 2)
      cv2.putText(output, text,cv2.FONT_HERSHEY_TRIPLEX, 0.8, (0, 0, 255), 2)



      **# show the output image**
      #orig = cv2.cvtColor(orig, cv2.COLOR_BGR2RGB)

      cv2.imshow("Text Detection", output)
      cv2.waitKey(0)






      python-3.x jupyter-notebook pytesseract






      share|improve this question













      share|improve this question











      share|improve this question




      share|improve this question










      asked Nov 21 '18 at 10:50









      CVKCVK

      94




      94
























          0






          active

          oldest

          votes











          Your Answer






          StackExchange.ifUsing("editor", function () {
          StackExchange.using("externalEditor", function () {
          StackExchange.using("snippets", function () {
          StackExchange.snippets.init();
          });
          });
          }, "code-snippets");

          StackExchange.ready(function() {
          var channelOptions = {
          tags: "".split(" "),
          id: "1"
          };
          initTagRenderer("".split(" "), "".split(" "), channelOptions);

          StackExchange.using("externalEditor", function() {
          // Have to fire editor after snippets, if snippets enabled
          if (StackExchange.settings.snippets.snippetsEnabled) {
          StackExchange.using("snippets", function() {
          createEditor();
          });
          }
          else {
          createEditor();
          }
          });

          function createEditor() {
          StackExchange.prepareEditor({
          heartbeatType: 'answer',
          autoActivateHeartbeat: false,
          convertImagesToLinks: true,
          noModals: true,
          showLowRepImageUploadWarning: true,
          reputationToPostImages: 10,
          bindNavPrevention: true,
          postfix: "",
          imageUploader: {
          brandingHtml: "Powered by u003ca class="icon-imgur-white" href="https://imgur.com/"u003eu003c/au003e",
          contentPolicyHtml: "User contributions licensed under u003ca href="https://creativecommons.org/licenses/by-sa/3.0/"u003ecc by-sa 3.0 with attribution requiredu003c/au003e u003ca href="https://stackoverflow.com/legal/content-policy"u003e(content policy)u003c/au003e",
          allowUrls: true
          },
          onDemand: true,
          discardSelector: ".discard-answer"
          ,immediatelyShowMarkdownHelp:true
          });


          }
          });














          draft saved

          draft discarded


















          StackExchange.ready(
          function () {
          StackExchange.openid.initPostLogin('.new-post-login', 'https%3a%2f%2fstackoverflow.com%2fquestions%2f53410477%2ftext-recognize-using-tesseract%23new-answer', 'question_page');
          }
          );

          Post as a guest















          Required, but never shown

























          0






          active

          oldest

          votes








          0






          active

          oldest

          votes









          active

          oldest

          votes






          active

          oldest

          votes
















          draft saved

          draft discarded




















































          Thanks for contributing an answer to Stack Overflow!


          • Please be sure to answer the question. Provide details and share your research!

          But avoid



          • Asking for help, clarification, or responding to other answers.

          • Making statements based on opinion; back them up with references or personal experience.


          To learn more, see our tips on writing great answers.




          draft saved


          draft discarded














          StackExchange.ready(
          function () {
          StackExchange.openid.initPostLogin('.new-post-login', 'https%3a%2f%2fstackoverflow.com%2fquestions%2f53410477%2ftext-recognize-using-tesseract%23new-answer', 'question_page');
          }
          );

          Post as a guest















          Required, but never shown





















































          Required, but never shown














          Required, but never shown












          Required, but never shown







          Required, but never shown

































          Required, but never shown














          Required, but never shown












          Required, but never shown







          Required, but never shown







          Popular posts from this blog

          Costa Masnaga

          Fotorealismo

          Sidney Franklin