Updated ndarray uid to use pointers. Updated frame publishing to recommend pyv4l2cam again after speed tests. Added spinwait for accurate framerate limiting. Added initial font work for glsl UI. Added scrolling for scaling. Added handling different numbers of channels. Made zmq dependency optional.

This commit is contained in:
2024-08-06 19:17:03 -07:00
parent 28f2f23358
commit 1f06999e5b
12 changed files with 377 additions and 94 deletions
+5 -1
View File
@@ -2,10 +2,14 @@
from collections.abc import Hashable
import numpy as np
def uid_for_source(video_source):
"""Get a uid for any source so it can be passed through the publisher-subscriber system."""
if len(str(video_source)) <= 1000:
if isinstance(video_source, np.ndarray):
uid = str(video_source.__array_interface__['data'][0]) # get array data pointer
elif len(str(video_source)) <= 1000:
uid = str(video_source)
elif isinstance(video_source, Hashable):
try:
View File
+17
View File
@@ -0,0 +1,17 @@
import os
if os.name == 'nt':
from get_fonts_windows import get_default_font_windows as get_default_font, list_fonts_windows as list_fonts
else:
from get_fonts_linux import get_default_font_linux as get_default_font, list_fonts_linux as list_fonts
def print_fonts():
fonts = list_fonts()
print("Available fonts:")
for font in fonts:
print(font)
if __name__ == '__main__':
print_fonts()
default_font = get_default_font()
print("Default font:", default_font)
+23
View File
@@ -0,0 +1,23 @@
import subprocess
def list_fonts_linux():
result = subprocess.run(['fc-list', '--format=%{file}\n'], stdout=subprocess.PIPE)
fonts = result.stdout.decode().strip().split('\n')
return fonts
def get_default_font_linux():
result = subprocess.run(['fc-match', '--format=%{file}\n'], stdout=subprocess.PIPE)
default_font = result.stdout.decode().strip()
return default_font
if __name__ == '__main__':
fonts = list_fonts_linux()
print("Available fonts on Linux:")
for font in fonts:
print(font)
default_font = get_default_font_linux()
print("Default font on Linux:", default_font)
+23
View File
@@ -0,0 +1,23 @@
import glob
import os
def list_fonts_windows():
font_dir = r'C:\Windows\Fonts'
fonts = []
fonts.extend(glob.glob(os.path.join(font_dir, '*.ttf')))
fonts.extend(glob.glob(os.path.join(font_dir, '*.otf')))
return fonts
def get_default_font_windows():
return r'C:\Windows\Fonts\arial.ttf'
if __name__ == '__main__':
fonts = list_fonts_windows()
print("Available fonts on Windows:")
for font in fonts:
print(font)
default_font = get_default_font_windows()
print("Default font on Windows:", default_font)
+91
View File
@@ -0,0 +1,91 @@
from PIL import ImageFont, ImageDraw, Image
import numpy as np
from get_fonts import get_default_font
def load_font(ttf_path=None, font_size=32):
if ttf_path is None:
ttf_path = get_default_font()
font = ImageFont.truetype(ttf_path, font_size)
return font
def render_glyphs(font, characters):
glyphs = {}
max_width = max_height = 0
for char in characters:
bbox = font.getbbox(char)
size = bbox[2]-bbox[0], bbox[3]
#size = font.size, font.font.height
image = Image.new('L', size=size)
draw = ImageDraw.Draw(image)
draw.text((-bbox[0], 0), char, font=font, fill=255)
glyphs[char] = image
max_width = max(max_width, size[0])
max_height = max(max_height, size[1])
return glyphs, max_width, max_height
def create_texture_atlas(glyphs, max_width, max_height):
num_glyphs = len(glyphs)
h = int(np.ceil(np.sqrt(num_glyphs)))
w = int(np.floor(np.sqrt(num_glyphs)))
atlas_width = max_width * w
atlas_height = max_height * h
atlas_image = Image.new('L', (atlas_width, atlas_height))
x_offset = 0
y_offset = 0
row_height = 0
glyph_data = {}
for char, glyph in glyphs.items():
if x_offset+glyph.width> atlas_width:
x_offset = 0
y_offset += row_height
row_height = 0
if y_offset> atlas_height:
raise ValueError("max_width and max_height are too small to contain all characters")
atlas_image.paste(glyph, (x_offset, y_offset))
glyph_data[char] = (x_offset, y_offset, glyph.width, glyph.height)
x_offset += glyph.width
if glyph.height> row_height:
row_height = glyph.height
return atlas_image.crop((0,0,atlas_width, y_offset+row_height)), glyph_data
def generate_glyph_metadata(glyph_data, atlas_width, atlas_height):
metadata = {}
for char, (x, y, width, height) in glyph_data.items():
metadata[char] = {
'uv_coords': (x / atlas_width, y / atlas_height, (x + width) / atlas_width, (y + height) / atlas_height),
'size': (width, height)
}
return metadata
def create_font_texture_atlas(ttf_path=None, font_size=12,
characters=[chr(i) for i in range(256)]):
font = load_font(ttf_path, font_size)
glyphs, max_width, max_height = render_glyphs(font, characters)
atlas_image, glyph_data = create_texture_atlas(glyphs, max_width, max_height)
metadata = generate_glyph_metadata(glyph_data, atlas_image.width, atlas_image.height)
# Convert atlas image to a format suitable for OpenGL (e.g., numpy array)
atlas_texture = np.array(atlas_image)
return atlas_texture, metadata
if __name__ == '__main__':
atlas_texture, metadata = create_font_texture_atlas(font_size=32,
characters=[chr(i) for i in range(256)])
print("Texture atlas created with metadata:", metadata)
from displayarray import DirectDisplay
d = DirectDisplay()
d.imshow("font", atlas_texture)
while not d.window.is_closing:
d.update()
+31 -14
View File
@@ -18,12 +18,12 @@ try:
using_pyv4l2cam = True
except ImportError:
pass
# while this is still good for raspberry pi, OpenCV tends to be faster for normal computers.
#warnings.warn("Could not import PyV4L2Cam on linux. Camera capture will be slow.")
#warnings.warn(
# "To install, run: pip install git+https://github.com/simleek/PyV4L2Cam.git"
#)
if sys.platform == "linux":
warnings.warn("Could not import PyV4L2Cam on linux. Camera capture will be slow.")
warnings.warn(
"To install, run: pip install git+https://github.com/simleek/PyV4L2Cam.git"
)
import numpy as np
@@ -36,6 +36,11 @@ from typing import Union, Tuple, Optional, Dict, Any, List, Callable
FrameCallable = Callable[[np.ndarray], Optional[np.ndarray]]
def spinwait_us(delay):
# thx: https://stackoverflow.com/a/74247651/782170
target = time.perf_counter_ns() + delay * 1000
while time.perf_counter_ns() < target:
pass
def pub_cam_loop_pyv4l2(
cam_id: Union[int, str, np.ndarray],
@@ -83,7 +88,7 @@ def pub_cam_loop_pyv4l2(
now = time.time()
while msg != "quit":
time.sleep(1.0 / (fps_limit - (time.time() - now)))
spinwait_us(1000000 / (fps_limit - (time.time() - now)))
now = time.time()
frame_bytes = cam.get_frame() # type: bytes
@@ -95,7 +100,11 @@ def pub_cam_loop_pyv4l2(
raise NotImplementedError(f"{cam.pixel_format} format not supported.")
if nd_frame is not None:
subscriber_dictionary.CV_CAMS_DICT[name].frame_pub.publish(nd_frame)
try:
subscriber_dictionary.CV_CAMS_DICT[name].frame_pub.publish(nd_frame)
except KeyError: # not sure why this happens, but I know I want it to exit correctly in this case
cam.close()
break
else:
cam.close()
subscriber_dictionary.CV_CAMS_DICT[name].status_pub.publish("failed")
@@ -162,24 +171,32 @@ def pub_cam_loop_opencv(
cam.set(cv2.CAP_PROP_FRAME_WIDTH, request_size[0])
cam.set(cv2.CAP_PROP_FRAME_HEIGHT, request_size[1])
count = cam.get(cv2.CAP_PROP_FRAME_COUNT)
if not cam.isOpened():
subscriber_dictionary.CV_CAMS_DICT[name].status_pub.publish("failed")
return False
now = time.time()
while msg != "quit":
time.sleep(1.0 / (fps_limit - (time.time() - now)))
now = time.time()
(ret, frame) = cam.read() # type: Tuple[bool, np.ndarray ]
if ret is False or not isinstance(frame, (np.ndarray, list)):
cam.release()
subscriber_dictionary.CV_CAMS_DICT[name].status_pub.publish("failed")
return False
if cam.get(cv2.CAP_PROP_FRAME_COUNT) > 0:
frame_counter += 1
if frame_counter >= cam.get(cv2.CAP_PROP_FRAME_COUNT):
if count>0: # sometimes mp4s just fail
frame_counter = 0
cam = cv2.VideoCapture(cam_id)
else:
subscriber_dictionary.CV_CAMS_DICT[name].status_pub.publish("failed")
return False
if count > 0:
frame_counter += 1
if frame_counter >= count-1:
frame_counter = 0
cam.release()
cam = cv2.VideoCapture(cam_id)
time2 = time.time()
#time.sleep(0.1 / (fps_limit - (time2 - now)))
spinwait_us(1000000 / (fps_limit - (time2 - now)))
now = time.time()
try:
subscriber_dictionary.CV_CAMS_DICT[name].frame_pub.publish(frame)
except KeyError: # we got deleted. Time to exit.
+43 -38
View File
@@ -1,49 +1,54 @@
"""Allow OpenCV to handle zmq subscriber addresses as input."""
import cv2
import zmq
from tensorcom.tenbin import decode_buffer # type: ignore
try:
import zmq
except ImportError:
class ZmqCam(object):
def __init__(self, img):
raise ImportError("Please install ZMQ")
else:
from tensorcom.tenbin import decode_buffer # type: ignore
class ZmqCam(object):
"""Add OpenCV camera controls to a numpy array."""
class ZmqCam(object):
"""Add OpenCV camera controls to a numpy array."""
def __init__(self, img):
"""Create a fake camera for OpenCV based on the initial array."""
assert isinstance(img, str)
s = img.split('#')
self.__ctx = zmq.Context()
self.__addr = s[0]
self.__sub = self.__ctx.socket(zmq.SUB)
if len(s) > 1:
self.__topic = bytes(s[1], 'ascii')
self.__sub.setsockopt(zmq.SUBSCRIBE, self.__topic)
else:
self.__topic = b""
self.__sub.connect(self.__addr)
def __init__(self, img):
"""Create a fake camera for OpenCV based on the initial array."""
assert isinstance(img, str)
s = img.split('#')
self.__ctx = zmq.Context()
self.__addr = s[0]
self.__sub = self.__ctx.socket(zmq.SUB)
if len(s) > 1:
self.__topic = bytes(s[1], 'ascii')
self.__sub.setsockopt(zmq.SUBSCRIBE, self.__topic)
else:
self.__topic = b""
self.__sub.connect(self.__addr)
self.__is_opened = True
self.__is_opened = True
def set(self, *args, **kwargs):
"""Set CAP_PROP_FRAME_WIDTH or CAP_PROP_FRAME_HEIGHT to scale a numpy array to that size."""
pass
def set(self, *args, **kwargs):
"""Set CAP_PROP_FRAME_WIDTH or CAP_PROP_FRAME_HEIGHT to scale a numpy array to that size."""
pass
@staticmethod
def get(*args, **kwargs):
"""Get OpenCV args. Currently only a fake CAP_PROP_FRAME_COUNT to fix detecting video ends."""
if args[0] == cv2.CAP_PROP_FRAME_COUNT:
return float("inf")
@staticmethod
def get(*args, **kwargs):
"""Get OpenCV args. Currently only a fake CAP_PROP_FRAME_COUNT to fix detecting video ends."""
if args[0] == cv2.CAP_PROP_FRAME_COUNT:
return float("inf")
def read(self):
"""Read back the numpy array in standard "did it work", "the array", OpenCV format."""
r = self.__sub.recv_multipart()
arrs = [decode_buffer(ri) for ri in r[1:]]
return True, arrs
def read(self):
"""Read back the numpy array in standard "did it work", "the array", OpenCV format."""
r = self.__sub.recv_multipart()
arrs = [decode_buffer(ri) for ri in r[1:]]
return True, arrs
def isOpened(self): # NOSONAR
"""Hack to tell OpenCV we're opened until we call release."""
return self.__is_opened
def isOpened(self): # NOSONAR
"""Hack to tell OpenCV we're opened until we call release."""
return self.__is_opened
def release(self):
"""Let OpenCV know we're finished."""
self.__is_opened = False
def release(self):
"""Let OpenCV know we're finished."""
self.__is_opened = False
+61 -14
View File
@@ -43,6 +43,35 @@ class MglWindowConfig(mgw.WindowConfig):
self.last_frame = frame
self.uibo.sel_lvl[0] = frame
def mouse_scroll_event(self, x_offset: float, y_offset: float):
if self.hit_buff is not None:
rect = self.rbuf.tex_levels[self.last_frame]['rect']
swap = self.rbuf.tex_levels[self.last_frame]['flags']&8
# Calculate current width and height of the rectangle
width = self.rbuf.tex_levels[self.last_frame]['width']
height = self.rbuf.tex_levels[self.last_frame]['height']
# Calculate scale factor (for example, 1% per scroll unit)
scale_factor = 0.1 * y_offset
# Calculate adjustment based on scale factor
width_adjustment = width * scale_factor
height_adjustment = height * scale_factor
if not swap:
# Adjust the top-left and bottom-right corners
rect[0] -= width_adjustment / 2 # Left
rect[1] -= height_adjustment / 2 # Top
rect[2] += width_adjustment / 2 # Right
rect[3] += height_adjustment / 2 # Bottom
else:
# Adjust the top-left and bottom-right corners
rect[0] -= width_adjustment / 2 # Left
rect[1] -= height_adjustment / 2 # Top
rect[2] += width_adjustment / 2 # Right
rect[3] += height_adjustment / 2 # Bottom
def mouse_press_event(self, x, y, button):
# sometimes mouse position event doesn't always trigger, so clicking can now help
if self.uibo is not None:
@@ -118,6 +147,11 @@ def create_no_input_texture(width=100, height=100):
return img
def pad_8_to_32(arr):
pad_len = -arr.size * np.dtype(arr.dtype).itemsize % np.dtype(np.float32).itemsize
if pad_len != 0:
arr = np.pad(arr.flatten(), (0, pad_len))
return arr.flatten().view(np.float32)
class InputTextureInfosUBO(object):
def __init__(self, start_textures=[]):
@@ -146,16 +180,20 @@ class InputTextureInfosUBO(object):
start_index = 0
else:
start_index = self.tex_levels[-1]['startIdx']+\
self.tex_levels[-1]['width']*self.tex_levels[-1]['height']*self.channels
self.tex_levels[-1]['width']*self.tex_levels[-1]['height']*self.tex_levels[-1]['channels']
width = img.shape[0]
height = img.shape[1]
assert img.shape[2] == self.channels
if len(img.shape)==2:
channels = 1
else:
channels = img.shape[2]
rect = [0,0,width,height]
self.tex_levels.append({
'startIdx': start_index,
'width': width,
'height': height,
'flags': flags,
'channels': channels,
'rect': rect
})
if isinstance(self.input_image, bytes):
@@ -163,25 +201,32 @@ class InputTextureInfosUBO(object):
#self.input_image = np.concatenate((self.input_image, img.flatten()), axis=0, dtype=self.input_image.dtype)
#self.input_image[start_index:] = img.flatten()
#self.input_image.extend(img.tobytes())
#img = pad_8_to_32(img)
self.input_image.append((img, start_index))
return i
def set_input_stream(self, i, img:np.ndarray, flags:int=0):
start_index = self.tex_levels[i]['startIdx']
end_index = start_index + img.shape[0] * img.shape[1] * self.channels
# end_index = start_index + img.shape[0] * img.shape[1] *
# assert img.shape[2] == self.channels
if len(img.shape)==2:
channels = 1
else:
channels = img.shape[2]
if i!=len(self.tex_levels) and \
self.tex_levels[i]['width']*self.tex_levels[i]['height']!=img.shape[0]*img.shape[1]:
ind = start_index
for j in range(i, len(self.tex_levels)):
new_start_index = ind + img.shape[0]*img.shape[1]*self.channels
new_start_index = ind + img.shape[0]*img.shape[1]*channels
self.tex_levels[j]['startIdx'] = new_start_index
ind = new_start_index
self.tex_levels[i]['width'] = img.shape[0]
self.tex_levels[i]['height'] = img.shape[1]
self.tex_levels[i]['flags'] = flags
self.tex_levels[i]['channels'] = channels
if isinstance(self.input_image, bytearray):
self.input_image = bytes(self.input_image)
@@ -198,17 +243,19 @@ class InputTextureInfosUBO(object):
# Mem.view(self.input_image)[start_index*4:end_index*4] = img.data
# an alternative would be to store a list of pointers to img.data or tobytes() and their sizes & offsets, then use write with offset for setting the buffer
#np.copyto(self.input_image[start_index:end_index], img.flat, casting='no')
#img = pad_8_to_32(img)
self.input_image[i] = (img, start_index)
def get_tex_data_buffer(self):
tex_data_bytes = bytearray()
tex_data_bytes.extend(struct.pack("<2ixxxxxxxx", self.channels, len(self.tex_levels)))
# glsl is alligned to vec4 or 128 bits or 32 bytes (32 xs)
tex_data_bytes.extend(struct.pack("<1ixxxxxxxxxxxx", len(self.tex_levels)))
for level in self.tex_levels:
tex_data_bytes.extend(struct.pack("<4i", level['startIdx'], level['width'], level['height'], level['flags'])) # todo: add 4th int holding flags (rgb order, w/h order)
tex_data_bytes.extend(struct.pack("<5i"+"x"*4*3, level['startIdx'], level['width'], level['height'], level['flags'], level['channels'])) # todo: add 4th int holding flags (rgb order, w/h order)
tex_data_bytes.extend(struct.pack("<4f", *level['rect']))
return bytes(tex_data_bytes)
def set_tex_data_buffer(self, data):
def set_tex_data_buffer(self, data): # todo: unusued, remove or update
if len(data) - 2 % 7 != 0:
raise ValueError("Input data size does not match buffer format")
self.channels = data[0]
@@ -219,7 +266,7 @@ class InputTextureInfosUBO(object):
self.tex_levels[i]['height'] = data[i * 7 + 4]
self.tex_levels[i]['rect'] = data[i * 7 + 5:i * 7 + 9]
def append_tex_data_buffer(self, data):
def append_tex_data_buffer(self, data): # todo: unusued, remove or update
if len(data) != 7:
raise ValueError("Input data size does not match buffer format")
self.tex_levels.append({
@@ -240,7 +287,7 @@ class InputTextureInfosUBO(object):
def get_input_image_buffer(self, writer):
for t in self.input_image:
img, start = t
writer(img.tobytes(), offset=start*4)
writer(img.tobytes(), offset=start)
#return bytes(self.input_image)
def set_input_image_buffer(self, data: np.ndarray):
@@ -323,7 +370,7 @@ class MglApp(object):
self.shader = self.ctx.program(vertex_shader=self.vertex_shader, fragment_shader=self.fragment_shader)
self.input_texture_ubo_buffer = self.ctx.buffer(reserve=4*1920*1080*4*3, dynamic=True)
self.input_texture_infos_ubo_buffer = self.ctx.buffer(reserve=4*30*7+4*2, dynamic=True)
self.input_texture_infos_ubo_buffer = self.ctx.buffer(reserve=4*30*9*4+4*2, dynamic=True)
self.user_input_ubo_buffer = self.ctx.buffer(self.user_input_ubo.to_bytes(), dynamic=False)
self.user_output_ubo_buffer = self.ctx.buffer(self.user_output_ubo.to_bytes(), dynamic=False)
@@ -422,10 +469,10 @@ class MglWindow(object):
self.window_names = {}
def imshow(self, window_name, frame):
if frame.dtype == np.uint8:
frame = frame.astype(np.float32) / 255
elif frame.dtype != np.float32:
frame = frame.astype(np.float32)
if frame.dtype in [np.float32, np.float64]:
frame = (frame*255).astype(np.uint8) # 0 to 1 to 0 to 255
elif frame.dtype not in [np.uint8, np.int8]:
frame = frame.astype(np.uint8)
if window_name in self.window_names.keys():
i = self.window_names[window_name]
+59 -20
View File
@@ -1,10 +1,13 @@
#version 430
//uniform sampler2D FontTexture;
struct TexLevel {
int startIdx;
int width;
int height;
int flags;
int channels;
vec4 rect; // 4 float representing position on triangle
};
@@ -14,11 +17,10 @@ struct TexLevel {
#define TEX_FLAG_BGR 1
layout(std430, binding = 0) buffer InputBuffer {
float inputImage[];
int inputImage[];
};
layout(std430, binding = 1) buffer TexData {
int channels;
int levels;
TexLevel texLevels[];
};
@@ -33,6 +35,13 @@ layout(std430, binding=3) buffer UserOutput {
vec2 hit_pos;
};
#define EXTRACT_UINT8_VALUE(value, index) \
(((value) >> ((index)<<3)) & 0xFFu)
#define EXTRACT_8_FROM_32_ARRAY(array, index) \
EXTRACT_UINT8_VALUE(array[index >> 2], index & 3)
#define EXTRACT_FLOAT_FROM_INT8_ARRAY(array, index) \
float(EXTRACT_8_FROM_32_ARRAY(array, index))/255
layout(origin_upper_left, pixel_center_integer) in vec4 gl_FragCoord;
layout(location = 0) out vec4 out_color;
@@ -42,6 +51,12 @@ float bilinearInterpolation(float x, float y, float bottomLeft, float bottomRigh
return mix(left, right, x);
}
/*vec4 print_char(vec2 p, int c)
{
if (p.x < .0 || p.x > 1. || p.y < 0. || p.y > 1.) return vec4(0, 0, 0, 1e5);
return textureGrad(FontTexture, p / 16. + fract(vec2(c, 15 - c / 16) / 16.), dFdx(p / 16.), dFdy(p / 16.));
}*/
void main() {
int our_level = -1;
float y_current = -1;
@@ -77,10 +92,10 @@ void main() {
y_current = int(levelHeight * (coord.y - texLevels[our_level].rect.y) / (texLevels[our_level].rect.w - texLevels[our_level].rect.y));
x_current = int(levelWidth * (coord.x - texLevels[our_level].rect.x) / (texLevels[our_level].rect.z - texLevels[our_level].rect.x));
int topLeftIdx = texLevels[our_level].startIdx + int(floor(x_current) * texLevels[our_level].height * channels + floor(y_current) * channels);
int topRightIdx = topLeftIdx + texLevels[our_level].height * channels;
int bottomLeftIdx = topLeftIdx + channels;
int bottomRightIdx = topRightIdx + channels;
int topLeftIdx = texLevels[our_level].startIdx + int(floor(x_current) * texLevels[our_level].height * texLevels[our_level].channels + floor(y_current) * texLevels[our_level].channels);
int topRightIdx = topLeftIdx + texLevels[our_level].height * texLevels[our_level].channels;
int bottomLeftIdx = topLeftIdx + texLevels[our_level].channels;
int bottomRightIdx = topRightIdx + texLevels[our_level].channels;
//leave this for visual debugging
out_color = vec4(float(y_current) / float(levelHeight), float(x_current) / float(levelWidth), 0.0, 1.0);
@@ -88,29 +103,31 @@ void main() {
out_color.x = bilinearInterpolation(
fract(x_current),
fract(y_current),
inputImage[bottomLeftIdx],
inputImage[bottomRightIdx],
inputImage[topLeftIdx],
inputImage[topRightIdx]
EXTRACT_FLOAT_FROM_INT8_ARRAY(inputImage,bottomLeftIdx),
EXTRACT_FLOAT_FROM_INT8_ARRAY(inputImage,bottomRightIdx),
EXTRACT_FLOAT_FROM_INT8_ARRAY(inputImage,topLeftIdx),
EXTRACT_FLOAT_FROM_INT8_ARRAY(inputImage,topRightIdx)
);
if (channels > 1) {
if (texLevels[our_level].channels > 1) {
out_color.y = bilinearInterpolation(
fract(x_current),
fract(y_current),
inputImage[bottomLeftIdx + 1],
inputImage[bottomRightIdx + 1],
inputImage[topLeftIdx + 1],
inputImage[topRightIdx + 1]
EXTRACT_FLOAT_FROM_INT8_ARRAY(inputImage,bottomLeftIdx + 1),
EXTRACT_FLOAT_FROM_INT8_ARRAY(inputImage,bottomRightIdx + 1),
EXTRACT_FLOAT_FROM_INT8_ARRAY(inputImage,topLeftIdx + 1),
EXTRACT_FLOAT_FROM_INT8_ARRAY(inputImage,topRightIdx + 1)
);
}else{
out_color.xyz = out_color.xxx;
}
if (channels > 2) {
if (texLevels[our_level].channels > 2) {
out_color.z = bilinearInterpolation(
fract(x_current),
fract(y_current),
inputImage[bottomLeftIdx + 2],
inputImage[bottomRightIdx + 2],
inputImage[topLeftIdx + 2],
inputImage[topRightIdx + 2]
EXTRACT_FLOAT_FROM_INT8_ARRAY(inputImage,bottomLeftIdx + 2),
EXTRACT_FLOAT_FROM_INT8_ARRAY(inputImage,bottomRightIdx + 2),
EXTRACT_FLOAT_FROM_INT8_ARRAY(inputImage,topLeftIdx + 2),
EXTRACT_FLOAT_FROM_INT8_ARRAY(inputImage,topRightIdx + 2)
);
}
if (bool(texLevels[our_level].flags & TEX_FLAG_BGR)) {
@@ -137,6 +154,28 @@ void main() {
) {
out_color = vec4(0.0, 0.5, 0.0, 1.0); // green selection border, on top of everything
}
/*vec2 textPos = vec2(texLevels[sel_level].rect.x + 5, texLevels[sel_level].rect.y + 5);
if (coord.x >= textPos.x && coord.y >= textPos.y &&
coord.x < textPos.x + 100 && coord.y < textPos.y + 16) // Text area dimensions
{
int window_name_ptr = string_ptrs[sel_level];
int end_ptr = string_ptrs[sel_level+1];
int char_index = 0;
vec2 uv = (coord - textPos);
float FontSize = 8.;
vec2 U = uv * 64.0 / FontSize;
vec4 O = vec4(0.0);
while (char_index < end_ptr) {
int char_code = strings[window_name_ptr+char_index];
if (char_code == 0) break; // Null terminator for string
U.x -= .5; O += print_char(U, char_code);
char_index++;
}
out_color = mix(out_color, O.xxxx, step(0.0, O.x)); // Blend text over the existing color
}*/
}
}
+8 -4
View File
@@ -1,12 +1,16 @@
from displayarray import display
import numpy as np
arr = np.random.normal(0.5, 0.1, (500, 500, 3))
arr = np.random.randint(0, 10, (1000, 900), dtype=np.uint8)
arr2 = np.random.randint(240, 255, (400, 500, 3), dtype=np.uint8)
arr3 = np.random.randint(127, 137, (250, 150, 3), dtype=np.uint8)
arr4 = np.random.randint(45, 55, (128, 28, 3), dtype=np.uint8)
arr5 = np.random.randint(190, 200, (32, 64, 3), dtype=np.uint8)
def fix_arr_cv(arr_in):
arr_in[:] += np.random.normal(0.01, 0.005, (500, 500, 3))
arr_in %= 1.0
arr_in[:] += np.random.randint(0, 2, arr_in.shape, dtype=np.uint8)
arr_in %= 255
display(arr, callbacks=fix_arr_cv, blocking=True)
display(*[arr, arr2, arr3, arr4, arr5], window_names=['1','2','3','4','5'], callbacks=fix_arr_cv, blocking=True)
+16 -3
View File
@@ -1,8 +1,21 @@
from displayarray import display
from displayarray import display, DirectDisplay
import numpy as np
arr = np.random.normal(0.5, 0.1, (100, 100, 5))
with display(0, size=(-1,-1)) as displayer:
with display(0, size=(9999,9999)) as displayer:
while displayer:
pass
import cv2
# proof opencv is slow
#d= DirectDisplay()
#size=(9999,9999)
#cam = cv2.VideoCapture(0)
#cam.set(cv2.CAP_PROP_FOURCC, cv2.CAP_OPENCV_MJPEG)
#cam.set(cv2.CAP_PROP_FRAME_WIDTH, size[0])
#cam.set(cv2.CAP_PROP_FRAME_HEIGHT, size[1])
#while not d.window.is_closing:
# ret, frame = cam.read()
# if frame is not None:
# d.imshow('cam', frame)
# d.update()