Reading from Webcam to display as a background

I’m using Windows 7 with PANDA3D 1.9.2, OPENCV 3.1 and Python 2.7.5

What I am trying to do is create a video feed from a digital camera and overlay a 3D image created with PANDA3D (similar to augmented reality).

I have started with the following code, which gives me a camera feed and displays it to the screen.

import numpy as np
import cv2

cap = cv2.VideoCapture(0)

while(True):
    # Capture frame-by-frame
    ret, frame = cap.read()

    # Our operations on the frame come here
    # gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

    # Display the resulting frame
    #cv2.imshow('frame',gray)
    cv2.imshow('frame',frame)
    if cv2.waitKey(1) & 0xFF == ord('q'):
        break

# When everything done, release the capture
cap.release()
cv2.destroyAllWindows()

However, I have found no way of making the CV frame a PANDA3D texture which I can attach to a cardmaker and place at the back of my scene. I have tried converting the CV2 frame to a buffer, using code I found on this site from an older post:

from pandac.PandaModules import WebcamVideo
from panda3d.core import MovieTexture, Texture, CardMaker, NodePath
from direct.showbase.ShowBase import ShowBase

import cv2
import numpy as np

class MediaPlayer(ShowBase):

    
    def __init__(self,cam_no):
        print "initialised"
        ShowBase.__init__(self)
        self.cap = cv2.VideoCapture(cam_no)

        ret, frame = self.cap.read()
        h,w,d = frame.shape
        self.tex = Texture()
        self.tex.setup2dTexture(h, w, Texture.T_unsigned_byte, Texture.F_luminance)
        buf = frame[:,:,0].T.tostring() # slice RGB to gray scale, transpose 90 degree, convert to text buffer
        self.tex.setRamImage(buf) # overwriting the memory with new buffer
        cm = CardMaker("My Fullscreen Card")
        cm.setFrameFullscreenQuad()

        # Tell the CardMaker to create texture coordinates that take into
        # account the padding region of the texture.
        cm.setUvRange(self.tex)

        # Now place the card in the scene graph and apply the texture to it.

        self.card = NodePath(cm.generate())
        self.card.setTexture(self.tex)
        self.card.reparentTo(base.render2d)

        taskMgr.add(self.updateTex, 'video frame update')

    def updateTex(self, task):
        ret, frame = self.cap.read()
        h,w,d = frame.shape
        #frame = np.random.randint(0,255,(h,w,3)).astype(np.uint8)
        buf = frame[:,:,0].T.tostring() # slice RGB to gray scale, transpose 90 degree, convert to text buffer
        self.tex.setRamImage(buf) # overwriting the memory with new buffer
        
        return task.cont

player = MediaPlayer(0)
#print "got here 1"
player.run()

This simply gives me a white box on the screen, even when using the randomly created frame. I can’t say as I completely understand the transformation from a CV frame to a panda3d buffer as I have taken this directly from an example.

I then tried using the Panda3D API calls to read directly from the camera, without using CV2:

from pandac.PandaModules import WebcamVideo
from panda3d.core import MovieTexture, Texture, CardMaker, NodePath
from direct.showbase.ShowBase import ShowBase

class MediaPlayer(ShowBase):

    def __init__(self,cam_no):
        print "initialised"
        ShowBase.__init__(self)

        option = WebcamVideo.getOption(cam_no)  
        self.cursor = option.open() 
        self.tex = MovieTexture("myTexture")
        print "Init"
        print self.tex
        if self.cursor.ready:
            print "I'm here"
            status = self.cursor.setTime(0,0)
            print "buffer full = %s" % status
            self.cursor.setupTexture(self.tex)
            #buf = self.cursor.fetchBuffer()
            #print "Buffer = %s" % buf
            x = self.cursor.sizeX()
            y = self.cursor.sizeY()
            print "x= %d y=%d" % (x ,y)
            print "is streaming %s" % self.cursor.streaming()
            print "Aborted = %s" % self.cursor.aborted()
                    
        # Set up a fullscreen card to set the video texture on.

        cm = CardMaker("My Fullscreen Card")
        cm.setFrameFullscreenQuad()

        # Tell the CardMaker to create texture coordinates that take into
        # account the padding region of the texture.
        #cm.setUvRange(self.tex)

        # Now place the card in the scene graph and apply the texture to it.

        self.card = NodePath(cm.generate())
        self.card.setTexture(self.tex)
        self.card.reparentTo(base.render2d)

        taskMgr.add(self.updateTex, 'video frame update')

    def updateTex(self, task):
        #print "update called"
        if self.cursor.ready: # video input
            #self.cursor.setupTexture(self.tex)
            self.card.setTexture(self.tex)
            #print "new frame"
        return task.cont



player = MediaPlayer(0)
#print "got here 1"
player.run()

This gives me a completely black screen. The cursor reports that there is a frame ready, and the frame x and y sizes are correct. However, if I try to read from the buffer (with or without the setTime call) the cursorFetchBuffer call returns a value of NONE, indicating that the camera has no data to return.

Has anybody ever managed to get a digital camera working using either of these methods. I’ve spent days on this and am about to give up, so any help would be very gratefully received – example code would be most useful as I am learning both Python and the Panda3D API.

Thanks in advance, Paul. :astonished:

This should work for you hopefully, just put together on Xubuntu 16.04, works with OpenCVTexture and WebcamVideo. Pass 1 argument from the command line to choose OpenCV camera input, 0 is default and 1 is second cam, 2 is third camera etc. ESC to exit. To run copy code to a file i.e. WebCardNopenCVthingy.py and run with:

python WebCardNopenCVthingy.py

from panda3d.core import loadPrcFileData 
from panda3d.core import PNMImage
from panda3d.vision import WebcamVideo
from panda3d.vision import OpenCVTexture
from panda3d.core import Texture, CardMaker, Point2, MovieTexture, TextureStage
import sys
from time import sleep,time

loadPrcFileData("", "textures-power-2 none")
loadPrcFileData('', 'client-sleep 0.001')

from direct.showbase.ShowBase import ShowBase

if len(sys.argv) > 1: TEST = sys.argv[1] # if passed with argument from cli will load that camera using OpenCVTexture 
else: TEST = "WebcamVideo"
 
class MyApp(ShowBase):

    def __init__(self):

        ShowBase.__init__(self)

        print "TRYING WITH " + TEST 

        if TEST == "WebcamVideo":
           # using WebcamVideo
            option = WebcamVideo.getOption( 0 ) # 0 here is default webcam, 1 would be second cam etc.
            videoTexture = MovieTexture(option)
            videoTexture.setKeepRamImage(True)
            print "WebcamVideo based texure infos: ->", videoTexture
        else :
            # using OpenCVTexture
            videoTexture = OpenCVTexture() 
            videoTexture.fromCamera(int(TEST))
            print "OpenCV based texure infos: -->", videoTexture
        
        cm = CardMaker("card")
        cm.setUvRange(Point2(0, 0), Point2(1, 1))
        cm.setFrame(-1, 1, -1, 1)
        card = render.attachNewNode(cm.generate())
        card.setTexture(videoTexture)
        card.setPos(0, 10, 0)
        card.reparentTo(self.render)
        
        self.accept('escape', lambda: sys.exit())
        
app = MyApp()
app.run()

…to run with OpenCVTexture:

python WebCardNopenCVthingy.py {webcam number} 

        ...where {webcam number} is the number of your cam, try 0 (zero) first, then 1 if you have second cam.

Any body has working solution ?
None of the above worked .
Recent one gave me this error

  glxGraphicsPipe
(all display modules loaded.)
:device(error): Error adding inotify watch on /dev/input: No space left on device
TRYING WITH WebcamVideo
:vision(error): Failed to map buffer!
:vision(error): Failed to map buffer!
:vision(error): Failed to map buffer!
:vision(error): Failed to map buffer!
WebcamVideo based texure infos: -> 2d_texture HD WebCam: HD WebCam
  2-d, 1280 x 720 pixels, each 3 bytes, rgb
  sampler wrap(u=repeat, v=repeat, w=repeat, border=0 0 0 1) filter(min=default, mag=default, aniso=0) lod(min=-1000, max=1000, bias=0)  2764800 bytes in ram, compression off

Segmentation fault (core dumped)

@Gaurav_Gola, I got the same error as you when implementing @neoniv’s code. For a separate project, I integrated OpenCV with Panda3D. I was able to set the Texture in Panda3D to my webcam video using OpenCV and Panda3D together. Make sure you have OpenCV installed and try running the following code. It works for me! I’m using Ubuntu 18.04.

import cv2
from panda3d.core import Texture, CardMaker
from direct.showbase.ShowBase import ShowBase

# open webcam
cap = cv2.VideoCapture(0)

# Check if the webcam is opened correctly
if not cap.isOpened():
    raise IOError("Cannot open webcam")

# use opencv to read from webcam
success = False
while not success:
    success, frame = cap.read()
h, w, _ = frame.shape  # accessing the width and height of the frame

# setup panda3d scripting env (render, taskMgr, camera etc)
base = ShowBase()

# set up a texture for (h by w) rgb image
tex = Texture()
tex.setup2dTexture(w, h, Texture.T_unsigned_byte,
                   Texture.F_rgb)

# set up a card to apply the numpy texture
cm = CardMaker('card')
card = render.attachNewNode(cm.generate())

WIDTHRATIO = 1
HEIGHTRATIO = h/w
CAMDISTANCE = 1.5
DEPTH = 1
# card is square, rescale to the original image aspect ratio
card.setScale(WIDTHRATIO, DEPTH, HEIGHTRATIO)
# bring it to center, put it in front of camera
card.setPos(-WIDTHRATIO/2, CAMDISTANCE, -HEIGHTRATIO/2)


def updateTex(task):
    success, frame = cap.read()
    if success:
        # positive y goes down in openCV, so we must flip the y coordinates
        flipped_frame = cv2.flip(frame, -1)
        # overwriting the memory with new frame
        tex.setRamImage(flipped_frame)
        card.setTexture(tex)  # now apply it to the card

    return task.cont


taskMgr.add(updateTex, 'video frame update')

base.run()
cap.release()