VideoTexture: fix RGB/BGR confusion, make code compatible with big endian CPU, add RGBA source filter.

This commit is contained in:
Benoit Bolsee 2008-11-04 12:04:59 +00:00
parent 6eb3bf53dd
commit 1886b7bf52
13 changed files with 162 additions and 69 deletions

@ -29,6 +29,12 @@ http://www.gnu.org/copyleft/lesser.txt.
#include "PyTypeList.h"
#define VT_C(v,idx) ((unsigned char*)&v)[idx]
#define VT_R(v) ((unsigned char*)&v)[0]
#define VT_G(v) ((unsigned char*)&v)[1]
#define VT_B(v) ((unsigned char*)&v)[2]
#define VT_A(v) ((unsigned char*)&v)[3]
#define VT_RGBA(v,r,g,b,a) VT_R(v)=(unsigned char)r, VT_G(v)=(unsigned char)g, VT_B(v)=(unsigned char)b, VT_A(v)=(unsigned char)a
// forward declaration
class FilterBase;

@ -63,25 +63,24 @@ protected:
short * size, unsigned int pixSize, unsigned int val)
{
// calculate differences
int difRed = int((val >> 16) & 0xFF) - int(m_color[0]);
int difGreen = int((val >> 8) & 0xFF) - int(m_color[1]);
int difBlue = int(val & 0xFF) - int(m_color[2]);
int difRed = int(VT_R(val)) - int(m_color[0]);
int difGreen = int(VT_G(val)) - int(m_color[1]);
int difBlue = int(VT_B(val)) - int(m_color[2]);
// calc distance from "blue screen" color
unsigned int dist = (unsigned int)(difRed * difRed + difGreen * difGreen
+ difBlue * difBlue);
// condition for fully transparent color
if (m_squareLimits[0] >= dist)
// return color with zero alpha
//return 0xFF000000;
return val & 0x00FFFFFF;
VT_A(val) = 0;
// condition for fully opaque color
else if (m_squareLimits[1] <= dist)
// return normal colour
return val | 0xFF000000;
VT_A(val) = 0xFF;
// otherwise calc alpha
else
return (val & 0x00FFFFFF) | ((((dist - m_squareLimits[0]) << 8)
/ m_limitDist) << 24);
VT_A(val) = (((dist - m_squareLimits[0]) << 8) / m_limitDist);
return val;
}
/// virtual filtering function for byte source

@ -223,7 +223,7 @@ FilterLevel::FilterLevel (void)
for (int r = 0; r < 4; ++r)
{
levels[r][0] = 0;
levels[r][1] = 0xFF << (r << 3);
levels[r][1] = 0xFF;
levels[r][2] = 0xFF;
}
}
@ -235,7 +235,7 @@ void FilterLevel::setLevels (ColorLevel & lev)
for (int r = 0; r < 4; ++r)
{
for (int c = 0; c < 2; ++c)
levels[r][c] = lev[r][c] << (r << 3);
levels[r][c] = lev[r][c];
levels[r][2] = lev[r][0] < lev[r][1] ? lev[r][1] - lev[r][0] : 1;
}
}
@ -252,9 +252,9 @@ inline FilterLevel * getFilterLevel (PyFilter * self)
static PyObject * getLevels (PyFilter * self, void * closure)
{
ColorLevel & lev = getFilterLevel(self)->getLevels();
return Py_BuildValue("((kk)(kk)(kk)(kk))",
lev[0][0], lev[0][1], lev[1][0] >> 8, lev[1][1] >> 8,
lev[2][0] >> 16, lev[2][1] >> 16, lev[3][0] >> 24, lev[3][1] >> 24);
return Py_BuildValue("((HH)(HH)(HH)(HH))",
lev[0][0], lev[0][1], lev[1][0], lev[1][1],
lev[2][0], lev[2][1], lev[3][0], lev[3][1]);
}
// set color levels
@ -279,7 +279,7 @@ static int setLevels (PyFilter * self, PyObject * value, void * closure)
valid = PyInt_Check(PySequence_Fast_GET_ITEM(row, c));
// if it is valid, save it in matrix
if (valid)
lev[r][c] = (unsigned long)(PyInt_AsLong(PySequence_Fast_GET_ITEM(row, c)));
lev[r][c] = (unsigned short)(PyInt_AsLong(PySequence_Fast_GET_ITEM(row, c)));
}
}
// if parameter is not valid, report error

@ -43,10 +43,13 @@ protected:
short * size, unsigned int pixSize, unsigned int val)
{
// calculate gray value
unsigned int gray = (28 * ((val >> 16) & 0xFF) + 151 * ((val >> 8) & 0xFF)
+ 77 * (val & 0xFF)) & 0xFF00;
unsigned int gray = (28 * (VT_B(val)) + 151 * (VT_G(val))
+ 77 * (VT_R(val))) >> 8;
// return gray scale value
return (val & 0xFF000000) | gray << 8 | gray | gray >> 8;
VT_R(val) = gray;
VT_G(val) = gray;
VT_B(val) = gray;
return val;
}
/// virtual filtering function for byte source
@ -82,11 +85,11 @@ protected:
ColorMatrix m_matrix;
/// calculate one color component
unsigned int calcColor (unsigned int val, short idx)
unsigned char calcColor (unsigned int val, short idx)
{
return (((m_matrix[idx][0] * (val & 0xFF) + m_matrix[idx][1] * ((val >> 8) & 0xFF)
+ m_matrix[idx][2] * ((val >> 16) & 0xFF) + m_matrix[idx][3] * ((val >> 24) & 0xFF)
+ m_matrix[idx][4]) >> 8) & 0xFF) << (idx << 3);
return (((m_matrix[idx][0] * (VT_R(val)) + m_matrix[idx][1] * (VT_G(val))
+ m_matrix[idx][2] * (VT_B(val)) + m_matrix[idx][3] * (VT_A(val))
+ m_matrix[idx][4]) >> 8) & 0xFF);
}
/// filter pixel template, source int buffer
@ -94,8 +97,9 @@ protected:
short * size, unsigned int pixSize, unsigned int val)
{
// return calculated color
return calcColor(val, 0) | calcColor(val, 1) | calcColor(val, 2)
| calcColor(val, 3);
int color;
VT_RGBA(color, calcColor(val, 0), calcColor(val, 1), calcColor(val, 2), calcColor(val, 3));
return color;
}
/// virtual filtering function for byte source
@ -110,7 +114,7 @@ protected:
/// type for color levels
typedef unsigned long ColorLevel[4][3];
typedef unsigned short ColorLevel[4][3];
/// pixel filter for color calculation
class FilterLevel : public FilterBase
@ -133,11 +137,10 @@ protected:
/// calculate one color component
unsigned int calcColor (unsigned int val, short idx)
{
unsigned int col = val & (0xFF << (idx << 3));
unsigned int col = VT_C(val,idx);;
if (col <= levels[idx][0]) col = 0;
else if (col >= levels[idx][1]) col = 0xFF << (idx << 3);
else if (idx < 3) col = (((col - levels[idx][0]) << 8) / levels[idx][2]) & (0xFF << (idx << 3));
else col = (((col - levels[idx][0]) / levels[idx][2]) << 8) & (0xFF << (idx << 3));
else if (col >= levels[idx][1]) col = 0xFF;
else col = (((col - levels[idx][0]) << 8) / levels[idx][2]) & 0xFF;
return col;
}
@ -146,8 +149,9 @@ protected:
short * size, unsigned int pixSize, unsigned int val)
{
// return calculated color
return calcColor(val, 0) | calcColor(val, 1) | calcColor(val, 2)
| calcColor(val, 3);
int color;
VT_RGBA(color, calcColor(val, 0), calcColor(val, 1), calcColor(val, 2), calcColor(val, 3));
return color;
}
/// virtual filtering function for byte source

@ -32,7 +32,7 @@ http://www.gnu.org/copyleft/lesser.txt.
// implementation FilterNormal
// constructor
FilterNormal::FilterNormal (void) : m_colShift(0)
FilterNormal::FilterNormal (void) : m_colIdx(0)
{
// set default depth
setDepth(4);
@ -44,7 +44,7 @@ void FilterNormal::setColor (unsigned short colIdx)
// check validity of index
if (colIdx < 3)
// set color shift
m_colShift = colIdx << 3;
m_colIdx = colIdx;
}
// set depth

@ -43,7 +43,7 @@ public:
virtual ~FilterNormal (void) {}
/// get index of color used to calculate normals
unsigned short getColor (void) { return m_colShift >> 3; }
unsigned short getColor (void) { return m_colIdx; }
/// set index of color used to calculate normals
void setColor (unsigned short colIdx);
@ -58,20 +58,28 @@ protected:
/// scale to calculate normals
float m_depthScale;
/// shift to used color component
unsigned short m_colShift;
/// color index, 0=red, 1=green, 2=blue, 3=alpha
unsigned short m_colIdx;
/// filter pixel, source int buffer
template <class SRC> unsigned int tFilter (SRC * src, short x, short y,
short * size, unsigned int pixSize, unsigned int val = 0)
{
// get value of required color
int actPix = int((val >> m_colShift) & 0xFF);
int actPix = int(VT_C(val,m_colIdx));
int upPix = actPix;
int leftPix = actPix;
// get upper and left pixel from actual pixel
int upPix = y > 0 ? int((convertPrevious(src - pixSize * size[0], x, y - 1,
size, pixSize) >> m_colShift) & 0xFF) : actPix;
int leftPix = x > 0 ? int((convertPrevious(src - pixSize, x - 1, y, size, pixSize)
>> m_colShift) & 0xFF) : actPix;
if (y > 0)
{
val = convertPrevious(src - pixSize * size[0], x, y - 1, size, pixSize);
upPix = VT_C(val,m_colIdx);
}
if (x > 0)
{
val = convertPrevious(src - pixSize, x - 1, y, size, pixSize);
leftPix = VT_C(val,m_colIdx);
}
// height differences (from blue color)
float dx = (actPix - leftPix) * m_depthScale;
float dy = (actPix - upPix) * m_depthScale;
@ -81,7 +89,8 @@ protected:
dy = dy * dz + normScaleKoef;
dz += normScaleKoef;
// return normal vector converted to color
return 0xFF000000 | int(dz) << 16 | int(dy) << 8 | int(dx);
VT_RGBA(val, dx, dy, dz, 0xFF);
return val;
}
/// filter pixel, source byte buffer

@ -77,6 +77,52 @@ PyTypeObject FilterRGB24Type =
Filter_allocNew, /* tp_new */
};
// FilterRGBA32
// define python type
PyTypeObject FilterRGBA32Type =
{
PyObject_HEAD_INIT(NULL)
0, /*ob_size*/
"VideoTexture.FilterRGBA32", /*tp_name*/
sizeof(PyFilter), /*tp_basicsize*/
0, /*tp_itemsize*/
(destructor)Filter_dealloc,/*tp_dealloc*/
0, /*tp_print*/
0, /*tp_getattr*/
0, /*tp_setattr*/
0, /*tp_compare*/
0, /*tp_repr*/
0, /*tp_as_number*/
0, /*tp_as_sequence*/
0, /*tp_as_mapping*/
0, /*tp_hash */
0, /*tp_call*/
0, /*tp_str*/
0, /*tp_getattro*/
0, /*tp_setattro*/
0, /*tp_as_buffer*/
Py_TPFLAGS_DEFAULT, /*tp_flags*/
"Source filter RGBA32 objects", /* tp_doc */
0, /* tp_traverse */
0, /* tp_clear */
0, /* tp_richcompare */
0, /* tp_weaklistoffset */
0, /* tp_iter */
0, /* tp_iternext */
NULL, /* tp_methods */
0, /* tp_members */
NULL, /* tp_getset */
0, /* tp_base */
0, /* tp_dict */
0, /* tp_descr_get */
0, /* tp_descr_set */
0, /* tp_dictoffset */
(initproc)Filter_init<FilterRGBA32>, /* tp_init */
0, /* tp_alloc */
Filter_allocNew, /* tp_new */
};
// FilterBGR24
// define python type

@ -44,7 +44,34 @@ protected:
/// filter pixel, source byte buffer
virtual unsigned int filter (unsigned char * src, short x, short y,
short * size, unsigned int pixSize, unsigned int val)
{ return 0xFF000000 | src[0] << 16 | src[1] << 8 | src[2]; }
{ VT_RGBA(val,src[0],src[1],src[2],0xFF); return val; }
};
/// class for RGBA32 conversion
class FilterRGBA32 : public FilterBase
{
public:
/// constructor
FilterRGBA32 (void) {}
/// destructor
virtual ~FilterRGBA32 (void) {}
/// get source pixel size
virtual unsigned int getPixelSize (void) { return 4; }
protected:
/// filter pixel, source byte buffer
virtual unsigned int filter (unsigned char * src, short x, short y,
short * size, unsigned int pixSize, unsigned int val)
{
if ((intptr_t(src)&0x3) == 0)
return *(unsigned int*)src;
else
{
VT_RGBA(val,src[0],src[1],src[2],src[3]);
return val;
}
}
};
/// class for BGR24 conversion
@ -63,7 +90,7 @@ protected:
/// filter pixel, source byte buffer
virtual unsigned int filter (unsigned char * src, short x, short y,
short * size, unsigned int pixSize, unsigned int val)
{ return 0xFF000000 | src[2] << 16 | src[1] << 8 | src[0]; }
{ VT_RGBA(val,src[2],src[1],src[0],0xFF); return val; }
};
/// class for YV12 conversion
@ -215,15 +242,15 @@ protected:
int red = (298 * c + 409 * e + 128) >> 8;
if (red >= 0x100) red = 0xFF;
else if (red < 0) red = 0;
int green = 298 * c - 100 * d - 208 * e;
if (green > 0x10000) green = 0xFF00;
int green = (298 * c - 100 * d - 208 * e) >> 8;
if (green >= 0x100) green = 0xFF;
else if (green < 0) green = 0;
int blue = (298 * c + 516 * d + 128) << 8;
if (blue > 0x1000000) blue = 0xFF0000;
int blue = (298 * c + 516 * d + 128) >> 8;
if (blue >= 0x100) blue = 0xFF;
else if (blue < 0) blue = 0;
// return result
return 0xFF000000 | blue & 0xFF0000 | green & 0xFF00
| red & 0xFF;
VT_RGBA(val, red, green, blue, 0xFF);
return val;
}
};

@ -32,7 +32,7 @@ http://www.gnu.org/copyleft/lesser.txt.
// default filter
FilterBGR24 defFilter;
FilterRGB24 defFilter;
// load image from buffer

@ -135,7 +135,7 @@ void ImageViewport::calcImage (unsigned int texId)
glReadPixels(m_upLeft[0], m_upLeft[1], (GLsizei)m_capSize[0], (GLsizei)m_capSize[1], GL_RGB,
GL_UNSIGNED_BYTE, m_viewportImage);
// filter loaded data
FilterBGR24 filt;
FilterRGB24 filt;
filterImage(filt, m_viewportImage, m_capSize);
}
}

@ -51,7 +51,7 @@ extern "C" void do_init_ffmpeg();
// constructor
VideoFFmpeg::VideoFFmpeg (HRESULT * hRslt) : VideoBase(),
m_codec(NULL), m_formatCtx(NULL), m_codecCtx(NULL),
m_frame(NULL), m_frameDeinterlaced(NULL), m_frameBGR(NULL), m_imgConvertCtx(NULL),
m_frame(NULL), m_frameDeinterlaced(NULL), m_frameRGB(NULL), m_imgConvertCtx(NULL),
m_deinterlace(false), m_preseek(0), m_videoStream(-1), m_baseFrameRate(25.0),
m_lastFrame(-1), m_curPosition(-1), m_startTime(0),
m_captWidth(0), m_captHeight(0), m_captRate(0.f)
@ -91,10 +91,10 @@ bool VideoFFmpeg::release()
MEM_freeN(m_frameDeinterlaced->data[0]);
av_free(m_frameDeinterlaced);
}
if (m_frameBGR)
if (m_frameRGB)
{
MEM_freeN(m_frameBGR->data[0]);
av_free(m_frameBGR);
MEM_freeN(m_frameRGB->data[0]);
av_free(m_frameRGB);
}
if (m_imgConvertCtx)
{
@ -106,7 +106,7 @@ bool VideoFFmpeg::release()
m_formatCtx = NULL;
m_frame = NULL;
m_frame = NULL;
m_frameBGR = NULL;
m_frameRGB = NULL;
m_imgConvertCtx = NULL;
// object will be deleted after that
@ -189,7 +189,7 @@ int VideoFFmpeg::openStream(const char *filename, AVInputFormat *inputFormat, AV
m_videoStream = videoStream;
m_frame = avcodec_alloc_frame();
m_frameDeinterlaced = avcodec_alloc_frame();
m_frameBGR = avcodec_alloc_frame();
m_frameRGB = avcodec_alloc_frame();
// allocate buffer if deinterlacing is required
@ -201,12 +201,12 @@ int VideoFFmpeg::openStream(const char *filename, AVInputFormat *inputFormat, AV
m_codecCtx->pix_fmt, m_codecCtx->width, m_codecCtx->height);
// allocate buffer to store final decoded frame
avpicture_fill((AVPicture*)m_frameBGR,
avpicture_fill((AVPicture*)m_frameRGB,
(uint8_t*)MEM_callocN(avpicture_get_size(
PIX_FMT_BGR24,
PIX_FMT_RGB24,
m_codecCtx->width, m_codecCtx->height),
"ffmpeg bgr"),
PIX_FMT_BGR24, m_codecCtx->width, m_codecCtx->height);
"ffmpeg rgb"),
PIX_FMT_RGB24, m_codecCtx->width, m_codecCtx->height);
// allocate sws context
m_imgConvertCtx = sws_getContext(
m_codecCtx->width,
@ -214,7 +214,7 @@ int VideoFFmpeg::openStream(const char *filename, AVInputFormat *inputFormat, AV
m_codecCtx->pix_fmt,
m_codecCtx->width,
m_codecCtx->height,
PIX_FMT_BGR24,
PIX_FMT_RGB24,
SWS_FAST_BILINEAR,
NULL, NULL, NULL);
@ -224,8 +224,8 @@ int VideoFFmpeg::openStream(const char *filename, AVInputFormat *inputFormat, AV
av_free(m_frame);
MEM_freeN(m_frameDeinterlaced->data[0]);
av_free(m_frameDeinterlaced);
MEM_freeN(m_frameBGR->data[0]);
av_free(m_frameBGR);
MEM_freeN(m_frameRGB->data[0]);
av_free(m_frameRGB);
return -1;
}
return 0;
@ -565,14 +565,14 @@ bool VideoFFmpeg::grabFrame(long position)
input = m_frameDeinterlaced;
}
}
// convert to BGR24
// convert to RGB24
sws_scale(m_imgConvertCtx,
input->data,
input->linesize,
0,
m_codecCtx->height,
m_frameBGR->data,
m_frameBGR->linesize);
m_frameRGB->data,
m_frameRGB->linesize);
av_free_packet(&packet);
frameLoaded = true;
break;

@ -100,7 +100,7 @@ protected:
// deinterlaced frame if codec requires it
AVFrame *m_frameDeinterlaced;
// decoded RGB24 frame if codec requires it
AVFrame *m_frameBGR;
AVFrame *m_frameRGB;
// conversion from raw to RGB is done with sws_scale
struct SwsContext *m_imgConvertCtx;
// should the codec be deinterlaced?
@ -150,7 +150,7 @@ protected:
bool grabFrame(long frame);
/// return the frame in RGB24 format, the image data is found in AVFrame.data[0]
AVFrame* getFrame(void) { return m_frameBGR; }
AVFrame* getFrame(void) { return m_frameRGB; }
};
inline VideoFFmpeg * getFFmpeg (PyImage * self)

@ -126,6 +126,7 @@ extern PyTypeObject FilterColorType;
extern PyTypeObject FilterLevelType;
extern PyTypeObject FilterNormalType;
extern PyTypeObject FilterRGB24Type;
extern PyTypeObject FilterRGBA32Type;
extern PyTypeObject FilterBGR24Type;
extern PyTypeObject ImageBuffType;
extern PyTypeObject ImageMixType;
@ -150,6 +151,7 @@ static void registerAllTypes(void)
pyFilterTypes.add(&FilterLevelType, "FilterLevel");
pyFilterTypes.add(&FilterNormalType, "FilterNormal");
pyFilterTypes.add(&FilterRGB24Type, "FilterRGB24");
pyFilterTypes.add(&FilterRGBA32Type, "FilterRGBA32");
pyFilterTypes.add(&FilterBGR24Type, "FilterBGR24");
}