Panoramic image stitching using opencv python











up vote
0
down vote

favorite
1












I am trying to stitch two images by finding the keypoints and stitching the images accordingly using opencv and python (panoramic image stitching). Luckily I found a code that do the trick. But it works well with given images alongside of the code. And it is not working for my images(getting only the second image as the final resultant image). The code is given below



#import the necessary packages
import numpy as np
import imutils
import cv2

class Stitcher:
def __init__(self):
# determine if we are using OpenCV v3.X
self.isv3 = imutils.is_cv3()

def stitch(self, images, ratio=0.75, reprojThresh=4.0,
showMatches=False):
# unpack the images, then detect keypoints and extract
# local invariant descriptors from them
(imageB, imageA) = images
(kpsA, featuresA) = self.detectAndDescribe(imageA)
(kpsB, featuresB) = self.detectAndDescribe(imageB)

# match features between the two images
M = self.matchKeypoints(kpsA, kpsB,
featuresA, featuresB, ratio, reprojThresh)

# if the match is None, then there aren't enough matched
# keypoints to create a panorama
if M is None:
return None

# otherwise, apply a perspective warp to stitch the images
# together
(matches, H, status) = M
#print (matches)
#print (H)
result = cv2.warpPerspective(imageA, H,
(imageA.shape[1] + imageB.shape[1], imageA.shape[0]))
cv2.imshow("a",result)
result[0:imageB.shape[0], 0:imageB.shape[1]] = imageB
cv2.imshow("b",result)

# check to see if the keypoint matches should be visualized
if showMatches:
vis = self.drawMatches(imageA, imageB, kpsA, kpsB, matches,
status)

# return a tuple of the stitched image and the
# visualization
return (result, vis)

# return the stitched image
return result

def detectAndDescribe(self, image):
# convert the image to grayscale
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)

# check to see if we are using OpenCV 3.X
if self.isv3:
# detect and extract features from the image
descriptor = cv2.xfeatures2d.SIFT_create()
(kps, features) = descriptor.detectAndCompute(image, None)

# otherwise, we are using OpenCV 2.4.X
else:
# detect keypoints in the image
detector = cv2.FeatureDetector_create("SIFT")
kps = detector.detect(gray)

# extract features from the image
extractor = cv2.DescriptorExtractor_create("SIFT")
(kps, features) = extractor.compute(gray, kps)

# convert the keypoints from KeyPoint objects to NumPy
# arrays
kps = np.float32([kp.pt for kp in kps])

# return a tuple of keypoints and features
return (kps, features)

def matchKeypoints(self, kpsA, kpsB, featuresA, featuresB,
ratio, reprojThresh):
# compute the raw matches and initialize the list of actual
# matches
matcher = cv2.DescriptorMatcher_create("BruteForce")
rawMatches = matcher.knnMatch(featuresA, featuresB, 2)
matches =

# loop over the raw matches
for m in rawMatches:
# ensure the distance is within a certain ratio of each
# other (i.e. Lowe's ratio test)
if len(m) == 2 and m[0].distance < m[1].distance * ratio:
matches.append((m[0].trainIdx, m[0].queryIdx))

# computing a homography requires at least 4 matches
if len(matches) > 4:
# construct the two sets of points
ptsA = np.float32([kpsA[i] for (_, i) in matches])
ptsB = np.float32([kpsB[i] for (i, _) in matches])

# compute the homography between the two sets of points
(H, status) = cv2.findHomography(ptsA, ptsB, cv2.RANSAC,
reprojThresh)

# return the matches along with the homograpy matrix
# and status of each matched point
return (matches, H, status)

# otherwise, no homograpy could be computed
return None

def drawMatches(self, imageA, imageB, kpsA, kpsB, matches, status):
# initialize the output visualization image
(hA, wA) = imageA.shape[:2]
(hB, wB) = imageB.shape[:2]
vis = np.zeros((max(hA, hB), wA + wB, 3), dtype="uint8")
vis[0:hA, 0:wA] = imageA
vis[0:hB, wA:] = imageB

# loop over the matches
for ((trainIdx, queryIdx), s) in zip(matches, status):
# only process the match if the keypoint was successfully
# matched
if s == 1:
# draw the match
ptA = (int(kpsA[queryIdx][0]), int(kpsA[queryIdx][1]))
ptB = (int(kpsB[trainIdx][0]) + wA, int(kpsB[trainIdx][1]))
cv2.line(vis, ptA, ptB, (0, 255, 0), 1)

# return the visualization
return vis


And I call this piece of script in my main code like :



from deeps.panorama import Stitcher
from PIL import Image
import pytesseract
import argparse
import imutils
import cv2

# construct the argument parse and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-f", "--first", required=True,
help="path to the first image")
ap.add_argument("-s", "--second", required=True,
help="path to the second image")
args = vars(ap.parse_args())

# load the two images and resize them to have a width of 400 pixels
# (for faster processing)
imageA = cv2.imread(args["first"])
imageB = cv2.imread(args["second"])

imageA = imutils.resize(imageA, width=400)
imageB = imutils.resize(imageB, width=400)

# stitch the images together to create a panorama
stitcher = Stitcher()
(result1, vis1) = stitcher.stitch([imageA, imageB], showMatches=True)

# show the images
cv2.imshow("Image A", imageA)
cv2.imshow("Image B", imageB)
cv2.imshow("Keypoint Matches1", vis1)
cv2.imshow("Result1", result1)
img = Image.fromarray(result1)
text = pytesseract.image_to_string(img)
print(text)
cv2.waitKey(0)


My ultimate aim is to get a final text from combining the images(scrolling text). The images are attached. I dunno what is wrong with the script, or if you have other solutions please let me know



enter image description here



enter image description here










share|improve this question


















  • 1




    You may try stitcher module directly of OpenCV. It does all these things under the hood.
    – ZdaR
    Jul 25 '17 at 7:05










  • I tried the stitcher module. I am getting error like this "OpenCV Error: Assertion failed (The data should normally be NULL!) in NumpyAllocator::allocate" I am using python 3.6 and opencv 3.2. there seems to be a bug in stitcher module. For my application python 3.6 suits.. So is there any other way other than stitcher module ?
    – Deepan Raj
    Jul 25 '17 at 9:07












  • That seems to be an issue with your OpenCV version, there is a long thread on github related to this. I think it is fixed in latest releases, so you may build OpenCV with latest code.
    – ZdaR
    Jul 25 '17 at 9:16










  • I installed with the new version. Still the same probelm occures while using stitcher module
    – Deepan Raj
    Jul 26 '17 at 7:22






  • 1




    If you are able to find the word that has been cut ("cont" in your example) then you can use it as a template and locate it in the second image (thnaks to Template Matching. Once you have that info, it's trivial to stitch both images. Having more than two images to stitch doesn't make the problem harder imo.
    – Elouarn Laine
    Jul 28 '17 at 13:16















up vote
0
down vote

favorite
1












I am trying to stitch two images by finding the keypoints and stitching the images accordingly using opencv and python (panoramic image stitching). Luckily I found a code that do the trick. But it works well with given images alongside of the code. And it is not working for my images(getting only the second image as the final resultant image). The code is given below



#import the necessary packages
import numpy as np
import imutils
import cv2

class Stitcher:
def __init__(self):
# determine if we are using OpenCV v3.X
self.isv3 = imutils.is_cv3()

def stitch(self, images, ratio=0.75, reprojThresh=4.0,
showMatches=False):
# unpack the images, then detect keypoints and extract
# local invariant descriptors from them
(imageB, imageA) = images
(kpsA, featuresA) = self.detectAndDescribe(imageA)
(kpsB, featuresB) = self.detectAndDescribe(imageB)

# match features between the two images
M = self.matchKeypoints(kpsA, kpsB,
featuresA, featuresB, ratio, reprojThresh)

# if the match is None, then there aren't enough matched
# keypoints to create a panorama
if M is None:
return None

# otherwise, apply a perspective warp to stitch the images
# together
(matches, H, status) = M
#print (matches)
#print (H)
result = cv2.warpPerspective(imageA, H,
(imageA.shape[1] + imageB.shape[1], imageA.shape[0]))
cv2.imshow("a",result)
result[0:imageB.shape[0], 0:imageB.shape[1]] = imageB
cv2.imshow("b",result)

# check to see if the keypoint matches should be visualized
if showMatches:
vis = self.drawMatches(imageA, imageB, kpsA, kpsB, matches,
status)

# return a tuple of the stitched image and the
# visualization
return (result, vis)

# return the stitched image
return result

def detectAndDescribe(self, image):
# convert the image to grayscale
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)

# check to see if we are using OpenCV 3.X
if self.isv3:
# detect and extract features from the image
descriptor = cv2.xfeatures2d.SIFT_create()
(kps, features) = descriptor.detectAndCompute(image, None)

# otherwise, we are using OpenCV 2.4.X
else:
# detect keypoints in the image
detector = cv2.FeatureDetector_create("SIFT")
kps = detector.detect(gray)

# extract features from the image
extractor = cv2.DescriptorExtractor_create("SIFT")
(kps, features) = extractor.compute(gray, kps)

# convert the keypoints from KeyPoint objects to NumPy
# arrays
kps = np.float32([kp.pt for kp in kps])

# return a tuple of keypoints and features
return (kps, features)

def matchKeypoints(self, kpsA, kpsB, featuresA, featuresB,
ratio, reprojThresh):
# compute the raw matches and initialize the list of actual
# matches
matcher = cv2.DescriptorMatcher_create("BruteForce")
rawMatches = matcher.knnMatch(featuresA, featuresB, 2)
matches =

# loop over the raw matches
for m in rawMatches:
# ensure the distance is within a certain ratio of each
# other (i.e. Lowe's ratio test)
if len(m) == 2 and m[0].distance < m[1].distance * ratio:
matches.append((m[0].trainIdx, m[0].queryIdx))

# computing a homography requires at least 4 matches
if len(matches) > 4:
# construct the two sets of points
ptsA = np.float32([kpsA[i] for (_, i) in matches])
ptsB = np.float32([kpsB[i] for (i, _) in matches])

# compute the homography between the two sets of points
(H, status) = cv2.findHomography(ptsA, ptsB, cv2.RANSAC,
reprojThresh)

# return the matches along with the homograpy matrix
# and status of each matched point
return (matches, H, status)

# otherwise, no homograpy could be computed
return None

def drawMatches(self, imageA, imageB, kpsA, kpsB, matches, status):
# initialize the output visualization image
(hA, wA) = imageA.shape[:2]
(hB, wB) = imageB.shape[:2]
vis = np.zeros((max(hA, hB), wA + wB, 3), dtype="uint8")
vis[0:hA, 0:wA] = imageA
vis[0:hB, wA:] = imageB

# loop over the matches
for ((trainIdx, queryIdx), s) in zip(matches, status):
# only process the match if the keypoint was successfully
# matched
if s == 1:
# draw the match
ptA = (int(kpsA[queryIdx][0]), int(kpsA[queryIdx][1]))
ptB = (int(kpsB[trainIdx][0]) + wA, int(kpsB[trainIdx][1]))
cv2.line(vis, ptA, ptB, (0, 255, 0), 1)

# return the visualization
return vis


And I call this piece of script in my main code like :



from deeps.panorama import Stitcher
from PIL import Image
import pytesseract
import argparse
import imutils
import cv2

# construct the argument parse and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-f", "--first", required=True,
help="path to the first image")
ap.add_argument("-s", "--second", required=True,
help="path to the second image")
args = vars(ap.parse_args())

# load the two images and resize them to have a width of 400 pixels
# (for faster processing)
imageA = cv2.imread(args["first"])
imageB = cv2.imread(args["second"])

imageA = imutils.resize(imageA, width=400)
imageB = imutils.resize(imageB, width=400)

# stitch the images together to create a panorama
stitcher = Stitcher()
(result1, vis1) = stitcher.stitch([imageA, imageB], showMatches=True)

# show the images
cv2.imshow("Image A", imageA)
cv2.imshow("Image B", imageB)
cv2.imshow("Keypoint Matches1", vis1)
cv2.imshow("Result1", result1)
img = Image.fromarray(result1)
text = pytesseract.image_to_string(img)
print(text)
cv2.waitKey(0)


My ultimate aim is to get a final text from combining the images(scrolling text). The images are attached. I dunno what is wrong with the script, or if you have other solutions please let me know



enter image description here



enter image description here










share|improve this question


















  • 1




    You may try stitcher module directly of OpenCV. It does all these things under the hood.
    – ZdaR
    Jul 25 '17 at 7:05










  • I tried the stitcher module. I am getting error like this "OpenCV Error: Assertion failed (The data should normally be NULL!) in NumpyAllocator::allocate" I am using python 3.6 and opencv 3.2. there seems to be a bug in stitcher module. For my application python 3.6 suits.. So is there any other way other than stitcher module ?
    – Deepan Raj
    Jul 25 '17 at 9:07












  • That seems to be an issue with your OpenCV version, there is a long thread on github related to this. I think it is fixed in latest releases, so you may build OpenCV with latest code.
    – ZdaR
    Jul 25 '17 at 9:16










  • I installed with the new version. Still the same probelm occures while using stitcher module
    – Deepan Raj
    Jul 26 '17 at 7:22






  • 1




    If you are able to find the word that has been cut ("cont" in your example) then you can use it as a template and locate it in the second image (thnaks to Template Matching. Once you have that info, it's trivial to stitch both images. Having more than two images to stitch doesn't make the problem harder imo.
    – Elouarn Laine
    Jul 28 '17 at 13:16













up vote
0
down vote

favorite
1









up vote
0
down vote

favorite
1






1





I am trying to stitch two images by finding the keypoints and stitching the images accordingly using opencv and python (panoramic image stitching). Luckily I found a code that do the trick. But it works well with given images alongside of the code. And it is not working for my images(getting only the second image as the final resultant image). The code is given below



#import the necessary packages
import numpy as np
import imutils
import cv2

class Stitcher:
def __init__(self):
# determine if we are using OpenCV v3.X
self.isv3 = imutils.is_cv3()

def stitch(self, images, ratio=0.75, reprojThresh=4.0,
showMatches=False):
# unpack the images, then detect keypoints and extract
# local invariant descriptors from them
(imageB, imageA) = images
(kpsA, featuresA) = self.detectAndDescribe(imageA)
(kpsB, featuresB) = self.detectAndDescribe(imageB)

# match features between the two images
M = self.matchKeypoints(kpsA, kpsB,
featuresA, featuresB, ratio, reprojThresh)

# if the match is None, then there aren't enough matched
# keypoints to create a panorama
if M is None:
return None

# otherwise, apply a perspective warp to stitch the images
# together
(matches, H, status) = M
#print (matches)
#print (H)
result = cv2.warpPerspective(imageA, H,
(imageA.shape[1] + imageB.shape[1], imageA.shape[0]))
cv2.imshow("a",result)
result[0:imageB.shape[0], 0:imageB.shape[1]] = imageB
cv2.imshow("b",result)

# check to see if the keypoint matches should be visualized
if showMatches:
vis = self.drawMatches(imageA, imageB, kpsA, kpsB, matches,
status)

# return a tuple of the stitched image and the
# visualization
return (result, vis)

# return the stitched image
return result

def detectAndDescribe(self, image):
# convert the image to grayscale
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)

# check to see if we are using OpenCV 3.X
if self.isv3:
# detect and extract features from the image
descriptor = cv2.xfeatures2d.SIFT_create()
(kps, features) = descriptor.detectAndCompute(image, None)

# otherwise, we are using OpenCV 2.4.X
else:
# detect keypoints in the image
detector = cv2.FeatureDetector_create("SIFT")
kps = detector.detect(gray)

# extract features from the image
extractor = cv2.DescriptorExtractor_create("SIFT")
(kps, features) = extractor.compute(gray, kps)

# convert the keypoints from KeyPoint objects to NumPy
# arrays
kps = np.float32([kp.pt for kp in kps])

# return a tuple of keypoints and features
return (kps, features)

def matchKeypoints(self, kpsA, kpsB, featuresA, featuresB,
ratio, reprojThresh):
# compute the raw matches and initialize the list of actual
# matches
matcher = cv2.DescriptorMatcher_create("BruteForce")
rawMatches = matcher.knnMatch(featuresA, featuresB, 2)
matches =

# loop over the raw matches
for m in rawMatches:
# ensure the distance is within a certain ratio of each
# other (i.e. Lowe's ratio test)
if len(m) == 2 and m[0].distance < m[1].distance * ratio:
matches.append((m[0].trainIdx, m[0].queryIdx))

# computing a homography requires at least 4 matches
if len(matches) > 4:
# construct the two sets of points
ptsA = np.float32([kpsA[i] for (_, i) in matches])
ptsB = np.float32([kpsB[i] for (i, _) in matches])

# compute the homography between the two sets of points
(H, status) = cv2.findHomography(ptsA, ptsB, cv2.RANSAC,
reprojThresh)

# return the matches along with the homograpy matrix
# and status of each matched point
return (matches, H, status)

# otherwise, no homograpy could be computed
return None

def drawMatches(self, imageA, imageB, kpsA, kpsB, matches, status):
# initialize the output visualization image
(hA, wA) = imageA.shape[:2]
(hB, wB) = imageB.shape[:2]
vis = np.zeros((max(hA, hB), wA + wB, 3), dtype="uint8")
vis[0:hA, 0:wA] = imageA
vis[0:hB, wA:] = imageB

# loop over the matches
for ((trainIdx, queryIdx), s) in zip(matches, status):
# only process the match if the keypoint was successfully
# matched
if s == 1:
# draw the match
ptA = (int(kpsA[queryIdx][0]), int(kpsA[queryIdx][1]))
ptB = (int(kpsB[trainIdx][0]) + wA, int(kpsB[trainIdx][1]))
cv2.line(vis, ptA, ptB, (0, 255, 0), 1)

# return the visualization
return vis


And I call this piece of script in my main code like :



from deeps.panorama import Stitcher
from PIL import Image
import pytesseract
import argparse
import imutils
import cv2

# construct the argument parse and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-f", "--first", required=True,
help="path to the first image")
ap.add_argument("-s", "--second", required=True,
help="path to the second image")
args = vars(ap.parse_args())

# load the two images and resize them to have a width of 400 pixels
# (for faster processing)
imageA = cv2.imread(args["first"])
imageB = cv2.imread(args["second"])

imageA = imutils.resize(imageA, width=400)
imageB = imutils.resize(imageB, width=400)

# stitch the images together to create a panorama
stitcher = Stitcher()
(result1, vis1) = stitcher.stitch([imageA, imageB], showMatches=True)

# show the images
cv2.imshow("Image A", imageA)
cv2.imshow("Image B", imageB)
cv2.imshow("Keypoint Matches1", vis1)
cv2.imshow("Result1", result1)
img = Image.fromarray(result1)
text = pytesseract.image_to_string(img)
print(text)
cv2.waitKey(0)


My ultimate aim is to get a final text from combining the images(scrolling text). The images are attached. I dunno what is wrong with the script, or if you have other solutions please let me know



enter image description here



enter image description here










share|improve this question













I am trying to stitch two images by finding the keypoints and stitching the images accordingly using opencv and python (panoramic image stitching). Luckily I found a code that do the trick. But it works well with given images alongside of the code. And it is not working for my images(getting only the second image as the final resultant image). The code is given below



#import the necessary packages
import numpy as np
import imutils
import cv2

class Stitcher:
def __init__(self):
# determine if we are using OpenCV v3.X
self.isv3 = imutils.is_cv3()

def stitch(self, images, ratio=0.75, reprojThresh=4.0,
showMatches=False):
# unpack the images, then detect keypoints and extract
# local invariant descriptors from them
(imageB, imageA) = images
(kpsA, featuresA) = self.detectAndDescribe(imageA)
(kpsB, featuresB) = self.detectAndDescribe(imageB)

# match features between the two images
M = self.matchKeypoints(kpsA, kpsB,
featuresA, featuresB, ratio, reprojThresh)

# if the match is None, then there aren't enough matched
# keypoints to create a panorama
if M is None:
return None

# otherwise, apply a perspective warp to stitch the images
# together
(matches, H, status) = M
#print (matches)
#print (H)
result = cv2.warpPerspective(imageA, H,
(imageA.shape[1] + imageB.shape[1], imageA.shape[0]))
cv2.imshow("a",result)
result[0:imageB.shape[0], 0:imageB.shape[1]] = imageB
cv2.imshow("b",result)

# check to see if the keypoint matches should be visualized
if showMatches:
vis = self.drawMatches(imageA, imageB, kpsA, kpsB, matches,
status)

# return a tuple of the stitched image and the
# visualization
return (result, vis)

# return the stitched image
return result

def detectAndDescribe(self, image):
# convert the image to grayscale
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)

# check to see if we are using OpenCV 3.X
if self.isv3:
# detect and extract features from the image
descriptor = cv2.xfeatures2d.SIFT_create()
(kps, features) = descriptor.detectAndCompute(image, None)

# otherwise, we are using OpenCV 2.4.X
else:
# detect keypoints in the image
detector = cv2.FeatureDetector_create("SIFT")
kps = detector.detect(gray)

# extract features from the image
extractor = cv2.DescriptorExtractor_create("SIFT")
(kps, features) = extractor.compute(gray, kps)

# convert the keypoints from KeyPoint objects to NumPy
# arrays
kps = np.float32([kp.pt for kp in kps])

# return a tuple of keypoints and features
return (kps, features)

def matchKeypoints(self, kpsA, kpsB, featuresA, featuresB,
ratio, reprojThresh):
# compute the raw matches and initialize the list of actual
# matches
matcher = cv2.DescriptorMatcher_create("BruteForce")
rawMatches = matcher.knnMatch(featuresA, featuresB, 2)
matches =

# loop over the raw matches
for m in rawMatches:
# ensure the distance is within a certain ratio of each
# other (i.e. Lowe's ratio test)
if len(m) == 2 and m[0].distance < m[1].distance * ratio:
matches.append((m[0].trainIdx, m[0].queryIdx))

# computing a homography requires at least 4 matches
if len(matches) > 4:
# construct the two sets of points
ptsA = np.float32([kpsA[i] for (_, i) in matches])
ptsB = np.float32([kpsB[i] for (i, _) in matches])

# compute the homography between the two sets of points
(H, status) = cv2.findHomography(ptsA, ptsB, cv2.RANSAC,
reprojThresh)

# return the matches along with the homograpy matrix
# and status of each matched point
return (matches, H, status)

# otherwise, no homograpy could be computed
return None

def drawMatches(self, imageA, imageB, kpsA, kpsB, matches, status):
# initialize the output visualization image
(hA, wA) = imageA.shape[:2]
(hB, wB) = imageB.shape[:2]
vis = np.zeros((max(hA, hB), wA + wB, 3), dtype="uint8")
vis[0:hA, 0:wA] = imageA
vis[0:hB, wA:] = imageB

# loop over the matches
for ((trainIdx, queryIdx), s) in zip(matches, status):
# only process the match if the keypoint was successfully
# matched
if s == 1:
# draw the match
ptA = (int(kpsA[queryIdx][0]), int(kpsA[queryIdx][1]))
ptB = (int(kpsB[trainIdx][0]) + wA, int(kpsB[trainIdx][1]))
cv2.line(vis, ptA, ptB, (0, 255, 0), 1)

# return the visualization
return vis


And I call this piece of script in my main code like :



from deeps.panorama import Stitcher
from PIL import Image
import pytesseract
import argparse
import imutils
import cv2

# construct the argument parse and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-f", "--first", required=True,
help="path to the first image")
ap.add_argument("-s", "--second", required=True,
help="path to the second image")
args = vars(ap.parse_args())

# load the two images and resize them to have a width of 400 pixels
# (for faster processing)
imageA = cv2.imread(args["first"])
imageB = cv2.imread(args["second"])

imageA = imutils.resize(imageA, width=400)
imageB = imutils.resize(imageB, width=400)

# stitch the images together to create a panorama
stitcher = Stitcher()
(result1, vis1) = stitcher.stitch([imageA, imageB], showMatches=True)

# show the images
cv2.imshow("Image A", imageA)
cv2.imshow("Image B", imageB)
cv2.imshow("Keypoint Matches1", vis1)
cv2.imshow("Result1", result1)
img = Image.fromarray(result1)
text = pytesseract.image_to_string(img)
print(text)
cv2.waitKey(0)


My ultimate aim is to get a final text from combining the images(scrolling text). The images are attached. I dunno what is wrong with the script, or if you have other solutions please let me know



enter image description here



enter image description here







python image opencv panoramas image-stitching






share|improve this question













share|improve this question











share|improve this question




share|improve this question










asked Jul 25 '17 at 6:55









Deepan Raj

112111




112111








  • 1




    You may try stitcher module directly of OpenCV. It does all these things under the hood.
    – ZdaR
    Jul 25 '17 at 7:05










  • I tried the stitcher module. I am getting error like this "OpenCV Error: Assertion failed (The data should normally be NULL!) in NumpyAllocator::allocate" I am using python 3.6 and opencv 3.2. there seems to be a bug in stitcher module. For my application python 3.6 suits.. So is there any other way other than stitcher module ?
    – Deepan Raj
    Jul 25 '17 at 9:07












  • That seems to be an issue with your OpenCV version, there is a long thread on github related to this. I think it is fixed in latest releases, so you may build OpenCV with latest code.
    – ZdaR
    Jul 25 '17 at 9:16










  • I installed with the new version. Still the same probelm occures while using stitcher module
    – Deepan Raj
    Jul 26 '17 at 7:22






  • 1




    If you are able to find the word that has been cut ("cont" in your example) then you can use it as a template and locate it in the second image (thnaks to Template Matching. Once you have that info, it's trivial to stitch both images. Having more than two images to stitch doesn't make the problem harder imo.
    – Elouarn Laine
    Jul 28 '17 at 13:16














  • 1




    You may try stitcher module directly of OpenCV. It does all these things under the hood.
    – ZdaR
    Jul 25 '17 at 7:05










  • I tried the stitcher module. I am getting error like this "OpenCV Error: Assertion failed (The data should normally be NULL!) in NumpyAllocator::allocate" I am using python 3.6 and opencv 3.2. there seems to be a bug in stitcher module. For my application python 3.6 suits.. So is there any other way other than stitcher module ?
    – Deepan Raj
    Jul 25 '17 at 9:07












  • That seems to be an issue with your OpenCV version, there is a long thread on github related to this. I think it is fixed in latest releases, so you may build OpenCV with latest code.
    – ZdaR
    Jul 25 '17 at 9:16










  • I installed with the new version. Still the same probelm occures while using stitcher module
    – Deepan Raj
    Jul 26 '17 at 7:22






  • 1




    If you are able to find the word that has been cut ("cont" in your example) then you can use it as a template and locate it in the second image (thnaks to Template Matching. Once you have that info, it's trivial to stitch both images. Having more than two images to stitch doesn't make the problem harder imo.
    – Elouarn Laine
    Jul 28 '17 at 13:16








1




1




You may try stitcher module directly of OpenCV. It does all these things under the hood.
– ZdaR
Jul 25 '17 at 7:05




You may try stitcher module directly of OpenCV. It does all these things under the hood.
– ZdaR
Jul 25 '17 at 7:05












I tried the stitcher module. I am getting error like this "OpenCV Error: Assertion failed (The data should normally be NULL!) in NumpyAllocator::allocate" I am using python 3.6 and opencv 3.2. there seems to be a bug in stitcher module. For my application python 3.6 suits.. So is there any other way other than stitcher module ?
– Deepan Raj
Jul 25 '17 at 9:07






I tried the stitcher module. I am getting error like this "OpenCV Error: Assertion failed (The data should normally be NULL!) in NumpyAllocator::allocate" I am using python 3.6 and opencv 3.2. there seems to be a bug in stitcher module. For my application python 3.6 suits.. So is there any other way other than stitcher module ?
– Deepan Raj
Jul 25 '17 at 9:07














That seems to be an issue with your OpenCV version, there is a long thread on github related to this. I think it is fixed in latest releases, so you may build OpenCV with latest code.
– ZdaR
Jul 25 '17 at 9:16




That seems to be an issue with your OpenCV version, there is a long thread on github related to this. I think it is fixed in latest releases, so you may build OpenCV with latest code.
– ZdaR
Jul 25 '17 at 9:16












I installed with the new version. Still the same probelm occures while using stitcher module
– Deepan Raj
Jul 26 '17 at 7:22




I installed with the new version. Still the same probelm occures while using stitcher module
– Deepan Raj
Jul 26 '17 at 7:22




1




1




If you are able to find the word that has been cut ("cont" in your example) then you can use it as a template and locate it in the second image (thnaks to Template Matching. Once you have that info, it's trivial to stitch both images. Having more than two images to stitch doesn't make the problem harder imo.
– Elouarn Laine
Jul 28 '17 at 13:16




If you are able to find the word that has been cut ("cont" in your example) then you can use it as a template and locate it in the second image (thnaks to Template Matching. Once you have that info, it's trivial to stitch both images. Having more than two images to stitch doesn't make the problem harder imo.
– Elouarn Laine
Jul 28 '17 at 13:16

















active

oldest

votes











Your Answer






StackExchange.ifUsing("editor", function () {
StackExchange.using("externalEditor", function () {
StackExchange.using("snippets", function () {
StackExchange.snippets.init();
});
});
}, "code-snippets");

StackExchange.ready(function() {
var channelOptions = {
tags: "".split(" "),
id: "1"
};
initTagRenderer("".split(" "), "".split(" "), channelOptions);

StackExchange.using("externalEditor", function() {
// Have to fire editor after snippets, if snippets enabled
if (StackExchange.settings.snippets.snippetsEnabled) {
StackExchange.using("snippets", function() {
createEditor();
});
}
else {
createEditor();
}
});

function createEditor() {
StackExchange.prepareEditor({
heartbeatType: 'answer',
convertImagesToLinks: true,
noModals: true,
showLowRepImageUploadWarning: true,
reputationToPostImages: 10,
bindNavPrevention: true,
postfix: "",
imageUploader: {
brandingHtml: "Powered by u003ca class="icon-imgur-white" href="https://imgur.com/"u003eu003c/au003e",
contentPolicyHtml: "User contributions licensed under u003ca href="https://creativecommons.org/licenses/by-sa/3.0/"u003ecc by-sa 3.0 with attribution requiredu003c/au003e u003ca href="https://stackoverflow.com/legal/content-policy"u003e(content policy)u003c/au003e",
allowUrls: true
},
onDemand: true,
discardSelector: ".discard-answer"
,immediatelyShowMarkdownHelp:true
});


}
});














 

draft saved


draft discarded


















StackExchange.ready(
function () {
StackExchange.openid.initPostLogin('.new-post-login', 'https%3a%2f%2fstackoverflow.com%2fquestions%2f45295986%2fpanoramic-image-stitching-using-opencv-python%23new-answer', 'question_page');
}
);

Post as a guest















Required, but never shown






























active

oldest

votes













active

oldest

votes









active

oldest

votes






active

oldest

votes
















 

draft saved


draft discarded



















































 


draft saved


draft discarded














StackExchange.ready(
function () {
StackExchange.openid.initPostLogin('.new-post-login', 'https%3a%2f%2fstackoverflow.com%2fquestions%2f45295986%2fpanoramic-image-stitching-using-opencv-python%23new-answer', 'question_page');
}
);

Post as a guest















Required, but never shown





















































Required, but never shown














Required, but never shown












Required, but never shown







Required, but never shown

































Required, but never shown














Required, but never shown












Required, but never shown







Required, but never shown







Popular posts from this blog

Costa Masnaga

Fotorealismo

Sidney Franklin