VideoTexture module.
The only compilation system that works for sure is the MSVC project files. I've tried my best to
update the other compilation system but I count on the community to check and fix them.
This is Zdeno Miklas video texture plugin ported to trunk.
The original plugin API is maintained (can be found here http://home.scarlet.be/~tsi46445/blender/blendVideoTex.html)
EXCEPT for the following:
The module name is changed to VideoTexture (instead of blendVideoTex).
A new (and only) video source is now available: VideoFFmpeg()
You must pass 1 to 4 arguments when you create it (you can use named arguments):
VideoFFmpeg(file) : play a video file
VideoFFmpeg(file, capture, rate, width, height) : start a live video capture
file:
In the first form, file is a video file name, relative to startup directory.
It can also be a URL, FFmpeg will happily stream a video from a network source.
In the second form, file is empty or is a hint for the format of the video capture.
In Windows, file is ignored and should be empty or not specified.
In Linux, ffmpeg supports two types of device: VideoForLinux and DV1394.
The user specifies the type of device with the file parameter:
[<device_type>][:<standard>]
<device_type> : 'v4l' for VideoForLinux, 'dv1394' for DV1394; default to 'v4l'
<standard> : 'pal', 'secam' or 'ntsc', default to 'ntsc'
The driver name is constructed automatically from the device types:
v4l : /dev/video<capture>
dv1394: /dev/dv1394/<capture>
If you have different driver name, you can specify the driver name explicitely
instead of device type. Examples of valid file parameter:
/dev/v4l/video0:pal
/dev/ieee1394/1:ntsc
dv1394:ntsc
v4l:pal
:secam
capture:
Defines the index number of the capture source, starting from 0. The first capture device is always 0.
The VideoTexutre modules knows that you want to start a live video capture when you set this parameter to a number >= 0. Setting this parameter < 0 indicates a video file playback. Default value is -1.
rate:
the capture frame rate, by default 25 frames/sec
width:
height:
Width and height of the video capture in pixel, default value 0.
In Windows you must specify these values and they must fit with the capture device capability.
For example, if you have a webcam that can capture at 160x120, 320x240 or 640x480,
you must specify one of these couple of values or the opening of the video source will fail.
In Linux, default values are provided by the VideoForLinux driver if you don't specify width and height.
Simple example
**************
1. Texture definition script:
import VideoTexture
contr = GameLogic.getCurrentController()
obj = contr.getOwner()
if not hasattr(GameLogic, 'video'):
matID = VideoTexture.materialID(obj, 'MAVideoMat')
GameLogic.video = VideoTexture.Texture(obj, matID)
GameLogic.vidSrc = VideoTexture.VideoFFmpeg('trailer_400p.ogg')
# Streaming is also possible:
#GameLogic.vidSrc = VideoTexture.VideoFFmpeg('http://10.32.1.10/trailer_400p.ogg')
GameLogic.vidSrc.repeat = -1
# If the video dimensions are not a power of 2, scaling must be done before
# sending the texture to the GPU. This is done by default with gluScaleImage()
# but you can also use a faster, but less precise, scaling by setting scale
# to True. Best approach is to convert the video offline and set the dimensions right.
GameLogic.vidSrc.scale = True
# FFmpeg always delivers the video image upside down, so flipping is enabled automatically
#GameLogic.vidSrc.flip = True
if contr.getSensors()[0].isPositive():
GameLogic.video.source = GameLogic.vidSrc
GameLogic.vidSrc.play()
2. Texture refresh script:
obj = GameLogic.getCurrentController().getOwner()
if hasattr(GameLogic, 'video') != 0:
GameLogic.video.refresh(True)
You can download this demo here:
http://home.scarlet.be/~tsi46445/blender/VideoTextureDemo.blend
http://home.scarlet.be/~tsi46445/blender/trailer_400p.ogg
2008-10-31 22:35:52 +00:00
|
|
|
/* $Id$
|
|
|
|
-----------------------------------------------------------------------------
|
|
|
|
This source file is part of blendTex library
|
|
|
|
|
|
|
|
Copyright (c) 2007 The Zdeno Ash Miklas
|
|
|
|
|
|
|
|
This program is free software; you can redistribute it and/or modify it under
|
|
|
|
the terms of the GNU Lesser General Public License as published by the Free Software
|
|
|
|
Foundation; either version 2 of the License, or (at your option) any later
|
|
|
|
version.
|
|
|
|
|
|
|
|
This program is distributed in the hope that it will be useful, but WITHOUT
|
|
|
|
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
|
|
|
|
FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details.
|
|
|
|
|
|
|
|
You should have received a copy of the GNU Lesser General Public License along with
|
|
|
|
this program; if not, write to the Free Software Foundation, Inc., 59 Temple
|
|
|
|
Place - Suite 330, Boston, MA 02111-1307, USA, or go to
|
|
|
|
http://www.gnu.org/copyleft/lesser.txt.
|
|
|
|
-----------------------------------------------------------------------------
|
|
|
|
*/
|
|
|
|
|
|
|
|
#if !defined IMAGEBASE_H
|
|
|
|
#define IMAGEBASE_H
|
|
|
|
|
|
|
|
#include "Common.h"
|
|
|
|
|
|
|
|
#include <vector>
|
2008-11-02 18:02:31 +00:00
|
|
|
#include <PyObjectPlus.h>
|
VideoTexture module.
The only compilation system that works for sure is the MSVC project files. I've tried my best to
update the other compilation system but I count on the community to check and fix them.
This is Zdeno Miklas video texture plugin ported to trunk.
The original plugin API is maintained (can be found here http://home.scarlet.be/~tsi46445/blender/blendVideoTex.html)
EXCEPT for the following:
The module name is changed to VideoTexture (instead of blendVideoTex).
A new (and only) video source is now available: VideoFFmpeg()
You must pass 1 to 4 arguments when you create it (you can use named arguments):
VideoFFmpeg(file) : play a video file
VideoFFmpeg(file, capture, rate, width, height) : start a live video capture
file:
In the first form, file is a video file name, relative to startup directory.
It can also be a URL, FFmpeg will happily stream a video from a network source.
In the second form, file is empty or is a hint for the format of the video capture.
In Windows, file is ignored and should be empty or not specified.
In Linux, ffmpeg supports two types of device: VideoForLinux and DV1394.
The user specifies the type of device with the file parameter:
[<device_type>][:<standard>]
<device_type> : 'v4l' for VideoForLinux, 'dv1394' for DV1394; default to 'v4l'
<standard> : 'pal', 'secam' or 'ntsc', default to 'ntsc'
The driver name is constructed automatically from the device types:
v4l : /dev/video<capture>
dv1394: /dev/dv1394/<capture>
If you have different driver name, you can specify the driver name explicitely
instead of device type. Examples of valid file parameter:
/dev/v4l/video0:pal
/dev/ieee1394/1:ntsc
dv1394:ntsc
v4l:pal
:secam
capture:
Defines the index number of the capture source, starting from 0. The first capture device is always 0.
The VideoTexutre modules knows that you want to start a live video capture when you set this parameter to a number >= 0. Setting this parameter < 0 indicates a video file playback. Default value is -1.
rate:
the capture frame rate, by default 25 frames/sec
width:
height:
Width and height of the video capture in pixel, default value 0.
In Windows you must specify these values and they must fit with the capture device capability.
For example, if you have a webcam that can capture at 160x120, 320x240 or 640x480,
you must specify one of these couple of values or the opening of the video source will fail.
In Linux, default values are provided by the VideoForLinux driver if you don't specify width and height.
Simple example
**************
1. Texture definition script:
import VideoTexture
contr = GameLogic.getCurrentController()
obj = contr.getOwner()
if not hasattr(GameLogic, 'video'):
matID = VideoTexture.materialID(obj, 'MAVideoMat')
GameLogic.video = VideoTexture.Texture(obj, matID)
GameLogic.vidSrc = VideoTexture.VideoFFmpeg('trailer_400p.ogg')
# Streaming is also possible:
#GameLogic.vidSrc = VideoTexture.VideoFFmpeg('http://10.32.1.10/trailer_400p.ogg')
GameLogic.vidSrc.repeat = -1
# If the video dimensions are not a power of 2, scaling must be done before
# sending the texture to the GPU. This is done by default with gluScaleImage()
# but you can also use a faster, but less precise, scaling by setting scale
# to True. Best approach is to convert the video offline and set the dimensions right.
GameLogic.vidSrc.scale = True
# FFmpeg always delivers the video image upside down, so flipping is enabled automatically
#GameLogic.vidSrc.flip = True
if contr.getSensors()[0].isPositive():
GameLogic.video.source = GameLogic.vidSrc
GameLogic.vidSrc.play()
2. Texture refresh script:
obj = GameLogic.getCurrentController().getOwner()
if hasattr(GameLogic, 'video') != 0:
GameLogic.video.refresh(True)
You can download this demo here:
http://home.scarlet.be/~tsi46445/blender/VideoTextureDemo.blend
http://home.scarlet.be/~tsi46445/blender/trailer_400p.ogg
2008-10-31 22:35:52 +00:00
|
|
|
|
|
|
|
#include "PyTypeList.h"
|
|
|
|
|
|
|
|
#include "FilterBase.h"
|
|
|
|
|
|
|
|
|
|
|
|
// forward declarations
|
|
|
|
struct PyImage;
|
|
|
|
class ImageSource;
|
|
|
|
|
|
|
|
|
|
|
|
/// type for list of image sources
|
|
|
|
typedef std::vector<ImageSource*> ImageSourceList;
|
|
|
|
|
|
|
|
|
|
|
|
/// base class for image filters
|
|
|
|
class ImageBase
|
|
|
|
{
|
|
|
|
public:
|
|
|
|
/// constructor
|
|
|
|
ImageBase (bool staticSrc = false);
|
|
|
|
/// destructor
|
|
|
|
virtual ~ImageBase (void);
|
|
|
|
/// release contained objects, if returns true, object should be deleted
|
|
|
|
virtual bool release (void);
|
|
|
|
|
|
|
|
/// get image
|
|
|
|
unsigned int * getImage (unsigned int texId = 0);
|
|
|
|
/// get image size
|
|
|
|
short * getSize (void) { return m_size; }
|
|
|
|
/// get image buffer size
|
|
|
|
unsigned long getBuffSize (void)
|
|
|
|
{ return m_size[0] * m_size[1] * sizeof(unsigned int); }
|
|
|
|
/// refresh image - invalidate its current content
|
|
|
|
virtual void refresh (void);
|
|
|
|
|
|
|
|
/// get scale
|
|
|
|
bool getScale (void) { return m_scale; }
|
|
|
|
/// set scale
|
|
|
|
void setScale (bool scale) { m_scale = scale; m_scaleChange = true; }
|
|
|
|
/// get vertical flip
|
|
|
|
bool getFlip (void) { return m_flip; }
|
|
|
|
/// set vertical flip
|
|
|
|
void setFlip (bool flip) { m_flip = flip; }
|
|
|
|
|
|
|
|
/// get source object
|
|
|
|
PyImage * getSource (const char * id);
|
|
|
|
/// set source object, return true, if source was set
|
|
|
|
bool setSource (const char * id, PyImage * source);
|
|
|
|
|
|
|
|
/// get pixel filter
|
|
|
|
PyFilter * getFilter (void) { return m_pyfilter; }
|
|
|
|
/// set pixel filter
|
|
|
|
void setFilter (PyFilter * filt);
|
|
|
|
|
|
|
|
/// calculate size (nearest power of 2)
|
|
|
|
static short calcSize (short size);
|
|
|
|
|
|
|
|
protected:
|
|
|
|
/// image buffer
|
|
|
|
unsigned int * m_image;
|
|
|
|
/// image buffer size
|
|
|
|
unsigned int m_imgSize;
|
|
|
|
/// image size
|
|
|
|
short m_size[2];
|
|
|
|
/// image is available
|
|
|
|
bool m_avail;
|
|
|
|
|
|
|
|
/// scale image to power 2 sizes
|
|
|
|
bool m_scale;
|
|
|
|
/// scale was changed
|
|
|
|
bool m_scaleChange;
|
|
|
|
/// flip image vertically
|
|
|
|
bool m_flip;
|
|
|
|
|
|
|
|
/// source image list
|
|
|
|
ImageSourceList m_sources;
|
|
|
|
/// flag for disabling addition and deletion of sources
|
|
|
|
bool m_staticSources;
|
|
|
|
|
|
|
|
/// pixel filter
|
|
|
|
PyFilter * m_pyfilter;
|
|
|
|
|
|
|
|
/// initialize image data
|
|
|
|
void init (short width, short height);
|
|
|
|
|
|
|
|
/// find source
|
|
|
|
ImageSourceList::iterator findSource (const char * id);
|
|
|
|
|
|
|
|
/// create new source
|
|
|
|
virtual ImageSource * newSource (const char * id) { return NULL; }
|
|
|
|
|
|
|
|
/// check source sizes
|
|
|
|
bool checkSourceSizes (void);
|
|
|
|
|
|
|
|
/// calculate image from sources and set its availability
|
|
|
|
virtual void calcImage (unsigned int texId) {}
|
|
|
|
|
|
|
|
/// perform loop detection
|
|
|
|
bool loopDetect (ImageBase * img);
|
|
|
|
|
|
|
|
/// template for image conversion
|
|
|
|
template<class FLT, class SRC> void convImage (FLT & filter, SRC srcBuff,
|
|
|
|
short * srcSize)
|
|
|
|
{
|
|
|
|
// destination buffer
|
|
|
|
unsigned int * dstBuff = m_image;
|
|
|
|
// pixel size from filter
|
|
|
|
unsigned int pixSize = filter.firstPixelSize();
|
|
|
|
// if no scaling is needed
|
|
|
|
if (srcSize[0] == m_size[0] && srcSize[1] == m_size[1])
|
|
|
|
// if flipping isn't required
|
|
|
|
if (!m_flip)
|
|
|
|
// copy bitmap
|
|
|
|
for (short y = 0; y < m_size[1]; ++y)
|
|
|
|
for (short x = 0; x < m_size[0]; ++x, ++dstBuff, srcBuff += pixSize)
|
|
|
|
// copy pixel
|
|
|
|
*dstBuff = filter.convert(srcBuff, x, y, srcSize, pixSize);
|
|
|
|
// otherwise flip image top to bottom
|
|
|
|
else
|
|
|
|
{
|
|
|
|
// go to last row of image
|
|
|
|
srcBuff += srcSize[0] * (srcSize[1] - 1) * pixSize;
|
|
|
|
// copy bitmap
|
|
|
|
for (short y = m_size[1] - 1; y >= 0; --y, srcBuff -= 2 * srcSize[0] * pixSize)
|
|
|
|
for (short x = 0; x < m_size[0]; ++x, ++dstBuff, srcBuff += pixSize)
|
|
|
|
// copy pixel
|
|
|
|
*dstBuff = filter.convert(srcBuff, x, y, srcSize, pixSize);
|
|
|
|
}
|
|
|
|
// else scale picture (nearest neighbour)
|
|
|
|
else
|
|
|
|
{
|
|
|
|
// interpolation accumulator
|
|
|
|
int accHeight = srcSize[1] >> 1;
|
|
|
|
// if flipping is required
|
|
|
|
if (m_flip)
|
|
|
|
// go to last row of image
|
|
|
|
srcBuff += srcSize[0] * (srcSize[1] - 1) * pixSize;
|
|
|
|
// process image rows
|
|
|
|
for (int y = 0; y < srcSize[1]; ++y)
|
|
|
|
{
|
|
|
|
// increase height accum
|
|
|
|
accHeight += m_size[1];
|
|
|
|
// if pixel row has to be drawn
|
|
|
|
if (accHeight >= srcSize[1])
|
|
|
|
{
|
|
|
|
// decrease accum
|
|
|
|
accHeight -= srcSize[1];
|
|
|
|
// width accum
|
|
|
|
int accWidth = srcSize[0] >> 1;
|
|
|
|
// process row
|
|
|
|
for (int x = 0; x < srcSize[0]; ++x)
|
|
|
|
{
|
|
|
|
// increase width accum
|
|
|
|
accWidth += m_size[0];
|
|
|
|
// if pixel has to be drawn
|
|
|
|
if (accWidth >= srcSize[0])
|
|
|
|
{
|
|
|
|
// decrease accum
|
|
|
|
accWidth -= srcSize[0];
|
|
|
|
// convert pixel
|
|
|
|
*dstBuff = filter.convert(srcBuff, x, m_flip ? srcSize[1] - y - 1 : y,
|
|
|
|
srcSize, pixSize);
|
|
|
|
// next pixel
|
|
|
|
++dstBuff;
|
|
|
|
}
|
|
|
|
// shift source pointer
|
|
|
|
srcBuff += pixSize;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
// if pixel row will not be drawn
|
|
|
|
else
|
|
|
|
// move source pointer to next row
|
|
|
|
srcBuff += pixSize * srcSize[0];
|
|
|
|
// if y flipping is required
|
|
|
|
if (m_flip)
|
|
|
|
// go to previous row of image
|
|
|
|
srcBuff -= 2 * pixSize * srcSize[0];
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// template for specific filter preprocessing
|
|
|
|
template <class F, class SRC> void filterImage (F & filt, SRC srcBuff, short * srcSize)
|
|
|
|
{
|
|
|
|
// find first filter in chain
|
|
|
|
FilterBase * firstFilter = NULL;
|
|
|
|
if (m_pyfilter != NULL) firstFilter = m_pyfilter->m_filter->findFirst();
|
|
|
|
// if first filter is available
|
|
|
|
if (firstFilter != NULL)
|
|
|
|
{
|
|
|
|
// python wrapper for filter
|
|
|
|
PyFilter pyFilt;
|
|
|
|
pyFilt.m_filter = &filt;
|
|
|
|
// set specified filter as first in chain
|
|
|
|
firstFilter->setPrevious(&pyFilt, false);
|
|
|
|
// convert video image
|
|
|
|
convImage(*(m_pyfilter->m_filter), srcBuff, srcSize);
|
|
|
|
// delete added filter
|
|
|
|
firstFilter->setPrevious(NULL, false);
|
|
|
|
}
|
|
|
|
// otherwise use given filter for conversion
|
|
|
|
else convImage(filt, srcBuff, srcSize);
|
|
|
|
// source was processed
|
|
|
|
m_avail = true;
|
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
|
|
// python structure for image filter
|
|
|
|
struct PyImage
|
|
|
|
{
|
|
|
|
PyObject_HEAD
|
|
|
|
// source object
|
|
|
|
ImageBase * m_image;
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
|
|
// size of id
|
|
|
|
const int SourceIdSize = 32;
|
|
|
|
|
|
|
|
|
|
|
|
/// class for source of image
|
|
|
|
class ImageSource
|
|
|
|
{
|
|
|
|
public:
|
|
|
|
/// constructor
|
|
|
|
ImageSource (const char * id);
|
|
|
|
/// destructor
|
|
|
|
virtual ~ImageSource (void);
|
|
|
|
|
|
|
|
/// get id
|
|
|
|
const char * getId (void) { return m_id; }
|
|
|
|
/// compare id to argument
|
|
|
|
bool is (const char * id);
|
|
|
|
|
|
|
|
/// get source object
|
|
|
|
PyImage * getSource (void) { return m_source; }
|
|
|
|
/// set source object
|
|
|
|
void setSource (PyImage * source);
|
|
|
|
|
|
|
|
/// get image from source
|
|
|
|
unsigned int * getImage (void);
|
|
|
|
/// get buffered image
|
|
|
|
unsigned int * getImageBuf (void) { return m_image; }
|
|
|
|
/// refresh source
|
|
|
|
void refresh (void);
|
|
|
|
|
|
|
|
/// get image size
|
|
|
|
short * getSize (void)
|
|
|
|
{
|
|
|
|
static short defSize [] = {0, 0};
|
|
|
|
return m_source != NULL ? m_source->m_image->getSize() : defSize;
|
|
|
|
}
|
|
|
|
|
|
|
|
protected:
|
|
|
|
/// id of source
|
|
|
|
char m_id [SourceIdSize];
|
|
|
|
/// pointer to source structure
|
|
|
|
PyImage * m_source;
|
|
|
|
/// buffered image from source
|
|
|
|
unsigned int * m_image;
|
|
|
|
|
|
|
|
private:
|
|
|
|
/// default constructor is forbidden
|
|
|
|
ImageSource (void) {}
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
// list of python image types
|
|
|
|
extern PyTypeList pyImageTypes;
|
|
|
|
|
|
|
|
|
|
|
|
// functions for python interface
|
|
|
|
|
|
|
|
// object initialization
|
|
|
|
template <class T> static int Image_init (PyObject * pySelf, PyObject * args, PyObject * kwds)
|
|
|
|
{
|
|
|
|
PyImage * self = reinterpret_cast<PyImage*>(pySelf);
|
|
|
|
// create source object
|
|
|
|
if (self->m_image != NULL) delete self->m_image;
|
|
|
|
self->m_image = new T();
|
|
|
|
// initialization succeded
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
// object allocation
|
|
|
|
PyObject * Image_allocNew (PyTypeObject * type, PyObject * args, PyObject * kwds);
|
|
|
|
// object deallocation
|
|
|
|
void Image_dealloc (PyImage * self);
|
|
|
|
|
|
|
|
// get image data
|
|
|
|
PyObject * Image_getImage (PyImage * self, void * closure);
|
|
|
|
// get image size
|
|
|
|
PyObject * Image_getSize (PyImage * self, void * closure);
|
|
|
|
// refresh image - invalidate current content
|
|
|
|
PyObject * Image_refresh (PyImage * self);
|
|
|
|
|
|
|
|
// get scale
|
|
|
|
PyObject * Image_getScale (PyImage * self, void * closure);
|
|
|
|
// set scale
|
|
|
|
int Image_setScale (PyImage * self, PyObject * value, void * closure);
|
|
|
|
// get flip
|
|
|
|
PyObject * Image_getFlip (PyImage * self, void * closure);
|
|
|
|
// set flip
|
|
|
|
int Image_setFlip (PyImage * self, PyObject * value, void * closure);
|
|
|
|
|
|
|
|
// get filter source object
|
|
|
|
PyObject * Image_getSource (PyImage * self, PyObject * args);
|
|
|
|
// set filter source object
|
|
|
|
PyObject * Image_setSource (PyImage * self, PyObject * args);
|
|
|
|
|
|
|
|
// get pixel filter object
|
|
|
|
PyObject * Image_getFilter (PyImage * self, void * closure);
|
|
|
|
// set pixel filter object
|
|
|
|
int Image_setFilter (PyImage * self, PyObject * value, void * closure);
|
|
|
|
|
|
|
|
|
2008-11-01 15:58:49 +00:00
|
|
|
#endif
|