Browse Source

Actual greenscreen function added

master
kaqu 10 months ago
parent
commit
207d69dc89
8 changed files with 447 additions and 209 deletions
  1. +4
    -0
      README.md
  2. +10
    -9
      addimage.py
  3. +113
    -26
      background.py
  4. +171
    -160
      funcam.py
  5. +10
    -9
      getvideobyname.py
  6. +4
    -3
      getvirtualvideo.py
  7. +130
    -0
      kbhit.py
  8. +5
    -2
      virtcam.sh

+ 4
- 0
README.md View File

@ -9,6 +9,10 @@ I added a functioning one (store as /etc/akvcam/config.ini - w/ root rights!).
Also: You will need a 3rd gen. Intel i7/equivalent or better ...
22.06.20 New/improved features
Actual greenscreen may now be used (incorporates auto-adjust & keyboard manipulation)
27.05.20 New/improved features
Logo turning effect now optional


+ 10
- 9
addimage.py View File

@ -8,25 +8,26 @@ Add a PNG image to a frame
History:
--------
25.05.20/KQ Initial (modularized) version
27.05.20/KQ Variable naming corrected
"""
import numpy as np
import cv2
import sys
# My own new version w/ transparency effect
# My own new version w/ transparency effect (& reasonable performance)
def overlay_png(frame, x_offset, y_offset, png, alpha_mask):
"""Overlay a png on an OpenCV frame w/ transparency effect
"""Overlay a png on an OpenCV frame @x_offset/y_offset w/ transparency effect based on png alpha channel
"""
scr_height = png.shape[0]
scr_width = png.shape[1]
_scr_height = png.shape[0]
_scr_width = png.shape[1]
frame_roi = frame[y_offset:y_offset+scr_height, x_offset:x_offset+scr_width,0:3] # Source original region
all_square_bg_fg = cv2.bitwise_or(frame_roi>>2,png,mask=alpha_mask) # Both added, background somewhat reduced ...
mask_inv = cv2.bitwise_not(alpha_mask) # Negate mask (inverse)
surrounding_bg = cv2.bitwise_or(frame_roi,png,mask=mask_inv) # This yields the original background at transparent regions
frame[y_offset:y_offset+scr_height, x_offset:x_offset+scr_width,0:3] = all_square_bg_fg + surrounding_bg # Now add both: Transparent add achieved!
_frame_roi = frame[y_offset:y_offset+_scr_height, x_offset:x_offset+_scr_width, 0:3] # Source original region
_all_square_bg_fg = cv2.bitwise_or(_frame_roi >> 2, png, mask=alpha_mask) # Both added, background somewhat reduced ...
_mask_inv = cv2.bitwise_not(alpha_mask) # Negate mask (inverse)
_surrounding_bg = cv2.bitwise_or(_frame_roi, png, mask=_mask_inv) # This yields the original background at transparent regions
frame[y_offset:y_offset+_scr_height, x_offset:x_offset+_scr_width, 0:3] = _all_square_bg_fg + _surrounding_bg # Now add both: Transparent add achieved!
# Ref.: https://stackoverflow.com/questions/14063070/overlay-a-smaller-image-on-a-larger-image-python-opencv
def overlay_image_alpha(img, img_overlay, pos, alpha_mask):


+ 113
- 26
background.py View File

@ -8,60 +8,147 @@ Replace/insert different backgrounds
History:
--------
25.05.20/KQ Initial (modularized) version
27.05.20/KQ Variable naming corrected
30.05.20/KQ Tight blur & non-blocking keyboard
22.06.20/KQ Actual greenscreen function added
"""
import numpy as np
import cv2
import sys
import select
import os
import kbhit
def do_alternate_background(current_frame,background_frame,alternate_frame,bBlur):
current_R_min = 0
current_G_min = 121
current_B_min = 0
current_R_max = 141
current_G_max = 255
current_B_max = 151
current_sign = 0
kb = kbhit.KBHit() # Activate keyboard access
def do_alternate_background(current_frame, background_frame, alternate_frame, bBlur):
"""Processing pipeline to insert an alternate background (in-place!)
modelled with help from https://medium.com/fnplus/blue-or-green-screen-effect-with-open-cv-chroma-keying-94d4a6ab2743
"""
delta_frame = cv2.absdiff(current_frame,background_frame) # Delta between background & current frame
_delta_frame = cv2.absdiff(current_frame, background_frame) # Delta between background & current frame
lower_black = np.array([0, 0, 0])
upper_black = np.array([80, 80, 80])
mask = cv2.inRange(delta_frame, lower_black, upper_black) # Yields black&white
_lower_black = np.array([0, 0, 0])
_upper_black = np.array([80, 80, 80])
_mask = cv2.inRange(_delta_frame, _lower_black, _upper_black) # Yields black&white
### Attn.: We're operating on an inverse mask!
### Attn.: We're operating on an inverse _mask!
### dilate actually increases (white) background
### erode actually increases (black) foreground
kernel2 = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(5,5)) # Clear artefacts
mask = cv2.dilate(mask,kernel2,iterations=1)
_kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5,5)) # Clear artefacts
_mask = cv2.dilate(_mask, _kernel, iterations=1)
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(9,9)) # Clear artefacts
mask2 = cv2.erode(mask,kernel,iterations=7)
_kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (9,9)) # Clear artefacts
_mask = cv2.erode(_mask, _kernel, iterations=7)
kernel2 = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(3,3)) # Clear artefacts
mask3 = cv2.dilate(mask2,kernel2,iterations=1)
_kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3,3)) # Clear artefacts
_mask = cv2.dilate(_mask, _kernel, iterations=1)
# Ref. https://stackoverflow.com/questions/10316057/filling-holes-inside-a-binary-object
if False: # Fairly subjective improvement, bounding box keeps mask more steady ...
des = cv2.bitwise_not(mask3)
contour,hier = cv2.findContours(des,cv2.RETR_CCOMP,cv2.CHAIN_APPROX_SIMPLE)
des = cv2.bitwise_not(_mask)
contour,hier = cv2.findContours(des, cv2.RETR_CCOMP, cv2.CHAIN_APPROX_SIMPLE)
for cnt in contour:
#cv2.drawContours(des,[cnt],0,255,-1)
#cv2.drawContours(des, [cnt], 0, 255, -1)
hull = cv2.convexHull(cnt)
cv2.drawContours(des, [hull], 0, 255,-1)
mask4 = cv2.bitwise_not(des)
kernel2 = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(47,47)) # Shrink again
mask3 = cv2.dilate(mask4,kernel2,iterations=1) # along bounding box boundaries
cv2.drawContours(des, [hull], 0, 255, -1)
_mask = cv2.bitwise_not(des)
_kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (47,47)) # Shrink again
_mask = cv2.dilate(_mask, _kernel, iterations=1) # along bounding box boundaries
_masked_image = np.copy(current_frame) # Pick current frame
_cropped = _masked_image[70:445, 70:570] # Select 'ROI' only: y1:y2, x1:x2
_masked_image = cv2.resize(_cropped, (640,480)) # w/h -> upscale!
_masked_image[_mask != 0] = [0, 0, 0] # From foreground take modified part (us!)
# Now add alternate background
if bBlur:
alternate_frame2 = cv2.blur(current_frame.copy(), (127,127)) #(63,63))
else:
alternate_frame2 = alternate_frame.copy()
alternate_frame2[_mask == 0] = [0,0,0]
# Finally mix both together
return _masked_image + alternate_frame2
def init_rgb_filter(background_frame):
"""Simple binning to adapt filter to dominant background colour
"""
global current_R_min,current_G_min,current_B_min
global current_R_max,current_G_max,current_B_max,current_sign
blurred_bg_frame = cv2.blur(background_frame, (127,127)) #(63,63))
bg_min_frame = cv2.resize(blurred_bg_frame,(4,3)) # Fast complexity reduction! BGR colour scheme
#rgb_mean = np.mean(bg_min_frame, axis=0) #Collapse to 1 x 4 x [R,G,B] mean
rgb_mean = np.mean(bg_min_frame, axis=(0,1)) # Collapse straight to [R,G,B] mean
DELTA = 50 # TODO: Test!
current_R_min = int(rgb_mean[0]-DELTA) # Now use means + tolerance for filter!
current_R_max = int(rgb_mean[0]+DELTA)
current_G_min = int(rgb_mean[1]-DELTA)
current_G_max = int(rgb_mean[1]+DELTA)
current_B_min = int(rgb_mean[2]-DELTA)
current_B_max = int(rgb_mean[2]+DELTA)
_rgb_msg = "R("+str(current_R_min)+".."+str(current_R_max)+")" + \
"G("+str(current_G_min)+".."+str(current_G_max)+")" + \
"B("+str(current_B_min)+".."+str(current_B_max)+")"
print(_rgb_msg,file=sys.stderr)
def do_alternate_greenscreen(current_frame, background_frame, alternate_frame, bBlur):
"""Processing pipeline to insert an alternate background (in-place!) w/ help of a 'greenscreen'
modelled with help from https://medium.com/fnplus/blue-or-green-screen-effect-with-open-cv-chroma-keying-94d4a6ab2743
"""
global current_R_min,current_G_min,current_B_min
global current_R_max,current_G_max,current_B_max,current_sign
masked_image2 = np.copy(current_frame) # Pick current frame
cropped = masked_image2[70:445, 70:570] # Select 'ROI' only: y1:y2, x1:x2
masked_image = cv2.resize(cropped, (640,480)) # w/h -> upscale!
_lower_green = np.array([current_R_min, current_G_min, current_B_min]) # RGB/BGR
_upper_green = np.array([current_R_max, current_G_max, current_B_max])
_mask = cv2.inRange(current_frame, _lower_green, _upper_green) # Yields black & white
masked_image[mask3 != 0] = [0, 0, 0] # From foreground take modified part (us!)
_masked_image = np.copy(current_frame) # Pick current frame
_masked_image[_mask != 0] = [0, 0, 0] # From foreground take modified part (us!)
# Now add alternate background
if bBlur:
alternate_frame2 = cv2.blur(current_frame.copy(),(127,127)) #(63,63))
alternate_frame2 = cv2.blur(current_frame.copy(), (127,127)) #(63,63))
else:
alternate_frame2 = alternate_frame.copy()
alternate_frame2[mask3 == 0] = [0,0,0]
alternate_frame2[_mask == 0] = [0,0,0]
if kb.kbhit():
c = kb.getch()
if c == '-':
current_sign = -1
if c == '+':
current_sign = 1
if c == 'r':
current_R_min = current_R_min + current_sign
if c == 'R':
current_R_max = current_R_max + current_sign
if c == 'g':
current_G_min = current_G_min + current_sign
if c == 'G':
current_G_max = current_G_max + current_sign
if c == 'b':
current_B_min = current_B_min + current_sign
if c == 'B':
current_B_max = current_B_max + current_sign
_rgb_msg = "R("+str(current_R_min)+".."+str(current_R_max)+")" + \
"G("+str(current_G_min)+".."+str(current_G_max)+")" + \
"B("+str(current_B_min)+".."+str(current_B_max)+")"
print(_rgb_msg,file=sys.stderr)
# Finally mix both together
return masked_image + alternate_frame2
return _masked_image + alternate_frame2

+ 171
- 160
funcam.py View File

@ -24,6 +24,8 @@ History:
23.05.20/KQ Clock & date parameters (permitting minimal version)
25.05.20/KQ Modularized, re-factored, transparency effect
26.05.20/KQ Bug fixes, logo turning effect now optionally selectable
27.05.20/KQ Variable naming corrected
22.06.20/KQ Actual greenscreen function option (w/ auto-adaptation!) added (requires a 'defined' i.e. green (or blue) background!)
"""
import argparse
@ -49,131 +51,132 @@ parser.add_argument('--turn', action='store_true', help='Turning effect for logo
parser.add_argument('--alarm', nargs='?', help='Alarm time, format as HH:MM')
parser.add_argument('--altb', nargs='?', help='Alternate background image path')
parser.add_argument('--blur', action='store_true', help='Blur background (add-on for --altb option)')
parser.add_argument('--greenscreen', action='store_true', help='Greenscreen available (add-on for --altb option)')
parser.add_argument('--minimize', nargs=4, help='Display original stream resized @x,y w/ w,h (add-on for --altb option)')
parser.add_argument('--d3d', action='store_true', help='Draw w/ 3D elements')
parser.add_argument('--ttf', nargs='?', help='Use TrueType font <font> (Attn.: Scaling not used w/ ttf)')
parser.add_argument('--movie', nargs=5, help='Display a background movie @x,y w/ w,h')
args = parser.parse_args()
print("\n<<< funcam started ... >>>",file=sys.stderr)
print("\n<<< funcam started ... >>>", file=sys.stderr)
# Hint: print() to stderr (fh#2) as the actual video stream goes to stdout (fh#1)!
print("OpenCV version : "+str(cv2.__version__),file=sys.stderr) # For control purposes
# Prepare clock
if args.clock != None:
x_clock = int(args.clock[0]) #505
y_clock = int(args.clock[1]) #472
c_clock = int(args.clock[2],16) #(16,16,16)
r_clock = c_clock >> 16
g_clock = (c_clock & 0xff00) >> 8
b_clock = c_clock & 0xff
clock_x = int(args.clock[0]) #505
clock_y = int(args.clock[1]) #472
clock_c = int(args.clock[2], 16) #(16,16,16)
clock_r = clock_c >> 16
clock_g = (clock_c & 0xff00) >> 8
clock_b = clock_c & 0xff
# Prepare date
if args.date != None:
x_date = int(args.date[0]) #494
y_date = int(args.date[1]) #428
c_date = int(args.date[2],16) #(255,255,255)
r_date = c_date >> 16
g_date = (c_date & 0xff00) >> 8
b_date = c_date & 0xff
s_date = float(args.date[3]) #0.8
date_x = int(args.date[0]) #494
date_y = int(args.date[1]) #428
date_c = int(args.date[2], 16) #(255,255,255)
date_r = date_c >> 16
date_g = (date_c & 0xff00) >> 8
date_b = date_c & 0xff
date_s = float(args.date[3]) #0.8
# Some argument scanning for start
print("Using webcam on : /dev/video"+str(args.webcam),file=sys.stderr)
print("Using webcam on : /dev/video" + str(args.webcam), file=sys.stderr)
if args.clock:
print("Clock display : x/y="+str(args.clock[0])+"/"+str(args.clock[1])+" color="+str(r_clock)+","+str(g_clock)+","+str(b_clock),file=sys.stderr)
print("Clock display : x/y=" + str(args.clock[0]) + "/" + str(args.clock[1]) + " color=" + str(clock_r) + "," + str(clock_g) + "," + str(clock_b), file=sys.stderr)
if args.date:
print("Date display : x/y="+str(args.date[0])+"/"+str(args.date[1])+" color="+str(r_date)+","+str(g_date)+","+str(b_date)+" scale="+str(s_date),file=sys.stderr)
print("Date display : x/y=" + str(args.date[0]) + "/" + str(args.date[1]) + " color=" + str(date_r) + "," + str(date_g) + "," + str(date_b) + " scale=" + str(date_s), file=sys.stderr)
if args.id != None:
print("ID will be : \'"+args.id+"\'",file=sys.stderr)
print("ID will be : \'" + args.id + "\'", file=sys.stderr)
if args.logo != None:
print("Logo image file used : \'"+args.logo[6]+"\' x/y=" + str(args.logo[0])+"/"+str(args.logo[1])+" w/h="+str(args.logo[2])+"/"+str(args.logo[3])+ " \'"+args.logo[4]+"\',\'"+args.logo[5]+"\'",file=sys.stderr)
print("Logo image file used : \'"+ args.logo[6] +"\' x/y=" + str(args.logo[0]) + "/" + str(args.logo[1]) + " w/h=" + str(args.logo[2]) + "/" + str(args.logo[3]) + " \'" + args.logo[4] + "\',\'" + args.logo[5] + "\'", file=sys.stderr)
if args.turn:
print("Logo turning effect : Enabled",file=sys.stderr)
print("Logo turning effect : Enabled", file=sys.stderr)
if args.rss != None:
print("RSS feed : \'"+args.rss+"\'",file=sys.stderr)
print("RSS feed : \'" + args.rss + "\'", file=sys.stderr)
if args.alarm != None:
alarm_hh = int(args.alarm[0:2])
alarm_mm = int(args.alarm[3:5])
print("Alarm time : "+str(alarm_hh)+":"+str(alarm_mm),file=sys.stderr) # Alarm time
print("Alarm time : " + str(alarm_hh) + ":" + str(alarm_mm), file=sys.stderr) # Alarm time
else: # Default off
alarm_hh = -1
alarm_mm = -1
alternate_background = args.altb
if args.altb != None:
print("Alternate background (beta!): \'"+args.altb+"\'",file=sys.stderr)
print("Alternate background (beta!): \'" + args.altb + "\'", file=sys.stderr)
if args.blur:
print("Background blur : Enabled",file=sys.stderr)
print("Background blur : Enabled", file=sys.stderr)
if args.greenscreen:
print("Background greenscreen : Enabled", file=sys.stderr)
if args.minimize != None:
print("Original stream minimized : x/y="+str(args.minimize[0])+"/"+str(args.minimize[1])+" w/h="+str(args.minimize[2])+"/"+str(args.minimize[3]),file=sys.stderr)
print("Original stream minimized : x/y=" + str(args.minimize[0]) + "/" + str(args.minimize[1]) + " w/h=" + str(args.minimize[2]) + "/" + str(args.minimize[3]), file=sys.stderr)
if args.d3d:
print("3D element draw : Enabled",file=sys.stderr)
print("3D element draw : Enabled", file=sys.stderr)
if args.ttf != None:
print("TrueType font : "+args.ttf+ " (from /usr/share/fonts)",file=sys.stderr)
print("TrueType font : " + args.ttf + " (from /usr/share/fonts)", file=sys.stderr)
if args.movie != None:
print("Background movie : \'"+args.movie[4]+"\' x/y=" + str(args.movie[0])+"/"+str(args.movie[1])+" w/h="+str(args.movie[2])+"/"+str(args.movie[3]),file=sys.stderr)
print("Background movie : \'" + args.movie[4] + "\' x/y=" + str(args.movie[0]) + "/" + str(args.movie[1]) + " w/h=" + str(args.movie[2]) + "/" + str(args.movie[3]), file=sys.stderr)
# Prepare scroll line message as an RSS feed. Currently, this is only read once upon start ...
if args.rss != None:
d = feedparser.parse(args.rss)
print("Feed entries: " + str(len(d.entries)),file=sys.stderr)
rss_n = len(d.entries)
if len(d.entries) < 1:
fparser = feedparser.parse(args.rss)
print("Feed entries: " + str(len(fparser.entries)), file=sys.stderr)
rss_n = len(fparser.entries)
if len(fparser.entries) < 1:
msg = "+++ No/invalid feed data? "
else:
if rss_n > 10: # Limit to the latter stuff only ...
rss_n = 10
#try:
msg = "+++ "
for i in range(0,rss_n,1):
for i in range(0, rss_n, 1):
try:
msg = msg + datetime.datetime.strptime(d.entries[i].published,'%Y-%m-%dT%H:%M:%S+02:00').strftime('%H:%M')\
+ " " + d.entries[i].title + " +++ "
msg = msg + datetime.datetime.strptime(fparser.entries[i].published, '%Y-%m-%dT%H:%M:%S+02:00').strftime('%H:%M')\
+ " " + fparser.entries[i].title + " +++ "
except:
msg = msg + datetime.datetime.strptime(d.entries[i].published,'%a, %d %b %Y %H:%M:%S %z').strftime('%d.%m.%y')\
+ " " + d.entries[i].title + " +++ "
#msg = msg + " " + d.entries[i].title + " +++ "
msg = msg + datetime.datetime.strptime(fparser.entries[i].published, '%a, %d %b %Y %H:%M:%S %z').strftime('%d.%m.%y')\
+ " " + fparser.entries[i].title + " +++ "
#msg = msg + " " + fparser.entries[i].title + " +++ "
msg = msg.translate(str.maketrans({"ä":"ae", "ö":"oe", "ü":"ue", "ß":"ss", "Ä":"Ae", "Ö":"Oe", "Ü":"Ue"})) # OpenCV fonts lack umlauts
lenmsg = len(msg)
scroll_x = 500 # Scroll text background offset
# Prepare station emblem data
if args.logo != None:
src1 = cv2.imread(args.logo[6],-1)
if src1 is None:
print("*** " + args.logo[6] + " not found!",file=sys.stderr)
logo1 = cv2.imread(args.logo[6], -1)
if logo1 is None:
print("*** " + args.logo[6] + " not found!", file=sys.stderr)
exit(-1)
x_offset = int(args.logo[0]) #12
y_offset = int(args.logo[1]) #12
max_scr_width = int(args.logo[2]) #100/40 # Initial (full) width
scr_width = max_scr_width
scr_height = int(args.logo[3]) #60 # Height
scr_sign = -1 # Start shrinking
logo_x = int(args.logo[0]) #12
logo_y = int(args.logo[1]) #12
max_logo_width = int(args.logo[2]) #100/40 # Initial (full) width
logo_w = max_logo_width
logo_h = int(args.logo[3]) #60 # Height
logo_sign = -1 # Start shrinking
if args.turn != True:
src2 = cv2.resize(src1, (scr_width,scr_height)) # w/h
alpha = 0.5 # 0.5 (0..1)
beta = (1.0 - alpha)
logo2 = cv2.resize(logo1, (logo_w, logo_h)) # w/h
# Prepare 3D background elements
if args.d3d:
# Scroll line background
imgWhiteline3d = cv2.imread("whiteline3d.png",-1)
if imgWhiteline3d is None:
print("*** whiteline3d.png not found!",file=sys.stderr)
img_whiteline3d = cv2.imread("whiteline3d.png", -1)
if img_whiteline3d is None:
print("*** whiteline3d.png not found!", file=sys.stderr)
exit(-1)
imgWhiteline3d = cv2.resize(imgWhiteline3d, (500,30)) # w/h
img_whiteline3d = cv2.resize(img_whiteline3d, (500,30)) # w/h
# Clock background
imgWhite3d = cv2.imread("white3d.png",-1)
if imgWhite3d is None:
print("*** white3d.png not found!",file=sys.stderr)
img_white3d = cv2.imread("white3d.png", -1)
if img_white3d is None:
print("*** white3d.png not found!", file=sys.stderr)
exit(-1)
imgWhite3d = cv2.resize(imgWhite3d, (640-500,480-432)) # w/h
imgRed3d = cv2.imread("red3d.png",-1) # Color space adjust included (required!)
if imgRed3d is None:
print("*** red3d.png not found!",file=sys.stderr)
img_white3d = cv2.resize(img_white3d, (640-500,480-432)) # w/h
img_red3d = cv2.imread("red3d.png", -1) # Color space adjust included (required!)
if img_red3d is None:
print("*** red3d.png not found!", file=sys.stderr)
exit(-1)
imgRed3d = cv2.resize(imgRed3d, (640-500,480-432)) # w/h
img_red3d = cv2.resize(img_red3d, (640-500,480-432)) # w/h
if args.ttf != None: # TrueType fonts enabled?
# Pillow used for TrueType font integration
@ -187,7 +190,7 @@ if args.ttf != None: # TrueType fonts enabled?
# Use a TrueType font from /usr/share/fonts (search for *.ttf!)
# Bold small fonts: Liberation Sans Narrow Bold, Nimbus Sans L Bold Condensed, Ubuntu Condensed Bold
regularfont = ImageFont.truetype(args.ttf, 44) #"DejaVuSansCondensed-Bold.ttf", 44) # Height for clock
boldfont = ImageFont.truetype(args.ttf,24) #"DejaVuSansCondensed-Bold.ttf", 24) # Height for scroll line
boldfont = ImageFont.truetype(args.ttf, 24) #"DejaVuSansCondensed-Bold.ttf", 24) # Height for scroll line
# Prepare background movie
if args.movie != None:
@ -203,9 +206,9 @@ else:
# Prepare background 'greenscreen'
if alternate_background != None:
alternate_frame1 = cv2.imread(args.altb,-1)
alternate_frame1 = cv2.imread(args.altb, -1)
if alternate_frame1 is None:
print("*** "+str(args.altb)+" not found!",file=sys.stderr)
print("*** "+str(args.altb) + " not found!", file=sys.stderr)
exit(-1)
alternate_frame = cv2.resize(alternate_frame1, (640,480)) # w/h
#alternate_frame = cv2.cvtColor(alternate_frame2,cv2.COLOR_BGRA2RGB) # Correct color space
@ -218,9 +221,9 @@ if args.minimize != None:
mini_h = int(args.minimize[3])
# User ID label data
current_state = False # Label starts turned off
current_x = 0 # Initial coordinate offsets for labelling
current_y = 0
label_state = False # Label starts turned off
label_x = 0 # Initial coordinate offsets for labelling
label_y = 0
# Actual video capture loop
try:
@ -235,26 +238,29 @@ except:
exit(-2)
if alternate_background != None: # Shall we provide an alternative background?
print("1. Please pick your chair & get out of the picture together!\n2. Wait 5s for continuation message ...",file=sys.stderr)
print("1. Please pick your chair & get out of the picture together!\n2. Wait 5s for continuation message ...", file=sys.stderr)
time.sleep(3) # Time for operator to disappear!
for i in range(1,75): # Warmup skip ... (~2s)
for i in range(1, 75): # Warmup skip ... (~2s)
ret, background_frame = cap.read()
if ret!=True:
print("*** Can't create background frame!",file=sys.stderr)
if ret != True:
print("*** Can't create background frame!", file=sys.stderr)
exit(-3)
print("\0073. Thank you! You may enter webcam range again ...\n",file=sys.stderr)
if args.greenscreen:
background.init_rgb_filter(background_frame)
print("\0073. Thank you! You may enter webcam range again ...\n", file=sys.stderr)
print("- Start your webcam application & connect to the virtual camera\n"\
"- Press [Ctrl][C] to abort ...",file=sys.stderr)
"- Press [Ctrl][C] to abort ...", file=sys.stderr)
#--------------------------------------------------------------------------------------------------------------
while(cap.isOpened()):
try:
ret, frame = cap.read()
if ret==True:
if ret == True:
s = int(time.strftime("%S", time.localtime())) # Seconds (needed later as well ...)
hh = int(time.strftime("%H", time.localtime())) # Alarm time needed for processing
mm = int(time.strftime("%M", time.localtime()))
ss = int(time.strftime("%S", time.localtime())) # Seconds (needed later as well ...)
# Now: The fun stuff! ------------------------------------------
@ -263,7 +269,7 @@ while(cap.isOpened()):
try:
ret, fmovie = movie.read() # Next frame ...
#ret, fmovie = movie.read() # Next frame ... (speed-up nec.!)
fmovie = cv2.resize(fmovie, (movie_w,movie_h)) # w/h
fmovie = cv2.resize(fmovie, (movie_w, movie_h)) # w/h
alternate_frame2 = alternate_frame.copy()
alternate_frame2[movie_y:movie_y+movie_h, movie_x:movie_x+movie_w] = fmovie[0:movie_h, 0:movie_w] # Select 'ROI' only: y1:y2, x1:x2
except Exception as em: # EOF?
@ -278,44 +284,49 @@ while(cap.isOpened()):
if args.minimize != None: # Use simple resizing only
if args.blur: # Blurring requires special processing
frame = background.do_alternate_background(frame,background_frame,alternate_frame2,True)
if args.greenscreen: # We have a defined background 'greenscreen' in place
frame = background.do_alternate_greenscreen(frame, background_frame, alternate_frame2, True)
else:
frame = background.do_alternate_background(frame, background_frame, alternate_frame2, True)
fmini = cv2.resize(frame, (mini_w,mini_h)) # w/h
alternate_frame2[mini_y:mini_y+mini_h, mini_x:mini_x+mini_w] = fmini[0:mini_h, 0:mini_w] # Select 'ROI' only: y1:y2, x1:x2
frame = alternate_frame2
else: # Actual mask processing otherwise ...
frame = background.do_alternate_background(frame,background_frame,alternate_frame2,args.blur)
if args.greenscreen:
frame = background.do_alternate_greenscreen(frame, background_frame, alternate_frame2, args.blur)
else:
frame = background.do_alternate_background(frame, background_frame, alternate_frame2, args.blur)
# Emblem stancil (requires BGRA color space!)
if args.logo != None:
if args.turn:
scr_width = scr_width + scr_sign
if scr_width <= 4:
scr_sign = 1 # Start increasing
if scr_width >= max_scr_width:
scr_sign = -1 # & shrinking again
src2 = cv2.resize(src1, (scr_width,scr_height)) # w/h
logo_w = logo_w + logo_sign
if logo_w <= 4:
logo_sign = 1 # Start increasing
if logo_w >= max_logo_width:
logo_sign = -1 # & shrinking again
logo2 = cv2.resize(logo1, (logo_w, logo_h)) # w/h
# New version w/ transparency effect!
addimage.overlay_png(frame, x_offset+((max_scr_width-scr_width)//2), y_offset, src2[:, :, 0:3], src2[:, :, 3])
addimage.overlay_png(frame, logo_x + ((max_logo_width - logo_w)//2), logo_y, logo2[:, :, 0:3], logo2[:, :, 3])
if args.d3d:
# Scroll line background
if args.rss != None:
addimage.overlay_png(frame, 0, 450, imgWhiteline3d[:, :, 0:3], imgWhiteline3d[:, :, 3])
frame4 = cv2.cvtColor(frame,cv2.COLOR_BGRA2RGB) # Correct color space
addimage.overlay_png(frame, 0, 450, img_whiteline3d[:, :, 0:3], img_whiteline3d[:, :, 3])
frame4 = cv2.cvtColor(frame, cv2.COLOR_BGRA2RGB) # Correct color space
# Emblem subtext
if args.logo != None:
cv2.putText(frame4, args.logo[4], #" LIVE",
(x_offset-8,y_offset+scr_height+4), # x,y
(logo_x - 8, logo_y + logo_h + 4), # x,y
(cv2.FONT_HERSHEY_DUPLEX), # Font
0.4, # Scaling
(255, 255, 255), # RGB
1, # Thickness
cv2.LINE_AA)
cv2.putText(frame4, args.logo[5], #"Constance/Germany",
(x_offset-8,y_offset+scr_height+12), # x,y
(logo_x - 8, logo_y + logo_h + 12), # x,y
(cv2.FONT_HERSHEY_DUPLEX), # Font
0.4, # Scaling
(255, 255, 255), # RGB
@ -324,57 +335,57 @@ while(cap.isOpened()):
# Random walk text label
if args.id != None:
if s < 10:
current_state = True
if ss < 10:
label_state = True
else:
current_state = False
current_x = 0
current_y = 0
label_state = False
label_x = 0
label_y = 0
if current_state:
new_x_vector = random.randrange(-1,2,1) # yields -2..2
new_y_vector = random.randrange(-1,2,1) # yields -2..2
current_x = current_x + new_x_vector
current_y = current_y + new_y_vector
if label_state:
new_x_vector = random.randrange(-1, 2, 1) # yields -2..2
new_y_vector = random.randrange(-1, 2, 1) # yields -2..2
label_x = label_x + new_x_vector
label_y = label_y + new_y_vector
# Base (text) rectangle
cv2.rectangle(frame4,
(16+current_x,356+12+current_y), # Top/left
(352+current_x,416+12+current_y), # Bottom right
(255,255,0), # Color
(16 + label_x, 356 + 12 + label_y), # Top/left
(352 + label_x, 416 + 12 + label_y), # Bottom right
(255, 255, 0), # Color
-1) # Thickness
# Link to center
if args.minimize != None: # Variable center?
center_x = mini_x+int(mini_w*1/3)
center_y = mini_y+int(mini_h*2/3)
center_x = mini_x + int(mini_w * 1/3)
center_y = mini_y + int(mini_h * 2/3)
else: # No, fixed center!
center_x = 280
center_y = 330
cv2.line(frame4,
(156+current_x,360+12+current_y), # x1,y1
(center_x,center_y), # x2,y2
(255,255,0), # Color
(156 + label_x, 360 + 12 + label_y), # x1,y1
(center_x, center_y), # x2,y2
(255, 255, 0), # Color
2) # Thickness
# Center cross hair
cv2.line(frame4,
(center_x-8,center_y), # x1,y1
(center_x+8,center_y), # x2,y2
(255,255,255), # Color
(center_x - 8, center_y), # x1,y1
(center_x + 8, center_y), # x2,y2
(255, 255, 255), # Color
2) # Thickness
cv2.line(frame4,
(center_x,center_y-8), # x1,y1
(center_x,center_y+8), # x2,y2
(255,255,255), # Color
(center_x, center_y - 8), # x1,y1
(center_x, center_y + 8), # x2,y2
(255, 255, 255), # Color
2) # Thickness
# Text frame
cv2.rectangle(frame4,
(16+4+current_x,356+12+4+current_y), # Top/left
(352-4+current_x,416+12-4+current_y), # Bottom right
(0,0,0), # Color
(16 + 4 + label_x, 356 + 12 + 4 + label_y), # Top/left
(352 - 4 + label_x, 416 + 12 - 4 + label_y), # Bottom right
(0, 0, 0), # Color
-1) # Thickness
# Actual text output
cv2.putText(frame4, args.id,
(32+current_x, 400+12+current_y), # x,y
(32 + label_x, 400 + 12 + label_y), # x,y
(cv2.QT_FONT_NORMAL or cv2.QT_FONT_LIGHT), # Font
1.4, # Scaling
(255, 255, 255), # RGB
@ -385,94 +396,94 @@ while(cap.isOpened()):
if args.rss != None:
if args.d3d == False:
cv2.rectangle(frame4,
(0,450), # Top/left
(640,480), # Bottom right
(255,255,255), # Color
(0, 450), # Top/left
(640, 480), # Bottom right
(255, 255, 255), # Color
-1) # Thickness (-1=opaque/no border
if args.ttf != None:
imgPil = Image.fromarray(frame4) # Unfortunately nec. ... (OpenCV->PIL)
draw = ImageDraw.Draw(imgPil) # Get a drawing handle (?!)
draw.text((scroll_x, 452), msg, font=boldfont, fill=(16,16,16,0)) # Draw actual message
draw.text((scroll_x, 452), msg, font=boldfont, fill=(16, 16, 16, 0)) # Draw actual message
np_image = np.array(imgPil) # use numpy to convert the pil_image into a numpy array
frame4 = np_image # W/o conversion this shall be a valid cam frame already ...
else:
cv2.putText(frame4, msg,
(scroll_x,472), # x,y
(scroll_x, 472), # x,y
(cv2.QT_FONT_NORMAL or cv2.QT_FONT_LIGHT), # Font
0.8, #1.0, # Scaling
(0,0,0), #(64,64,64), #(0, 0, 128), # RGB
(0, 0, 0), #(64,64,64), #(0, 0, 128), # RGB
1, #2, # Thickness
cv2.LINE_AA)
if args.d3d == False:
cv2.line(frame4,
(498,440), # x1,y1
(498,480), # x2,y2
(200,200,200), # Color
(498, 440), # x1,y1
(498, 480), # x2,y2
(200, 200, 200), # Color
2) # Thickness
cv2.line(frame4,
(0,440), # x1,y1
(640,440), # x2,y2
(200,200,200), # Color
(0, 440), # x1,y1
(640, 440), # x2,y2
(200, 200, 200), # Color
1) # Thickness
scroll_x = scroll_x - 6
if scroll_x < -lenmsg*4.5*3: # was: -500 ...
if scroll_x < -lenmsg * 4.5 * 3: # was: -500 ...
scroll_x = 500
# Time output
if args.clock != None:
t = time.strftime("%H:%M", time.localtime()) # Was: %H:%M:%S ...
if args.d3d == False:
if hh==alarm_hh and mm==alarm_mm and (s % 2) == 0: # 1s blink during alarm 'minute'
if hh==alarm_hh and mm==alarm_mm and (ss % 2) == 0: # 1s blink during alarm 'minute'
cv2.rectangle(frame4,
(x_clock-5,y_clock-40), # Top/left 505,472
(x_clock+(640-505),y_clock+8), # Bottom right
(255,0,0), # Color
(clock_x -5, clock_y - 40), # Top/left 505,472
(clock_x + (640-505), clock_y + 8), # Bottom right
(255, 0, 0), # Color
-1) # Thickness (-1=opaque/no border
else:
cv2.rectangle(frame4,
(x_clock-5,y_clock-40), # Top/left
(x_clock+(640-505),y_clock+8), # Bottom right
(160,160,160), # Color
(clock_x - 5, clock_y - 40), # Top/left
(clock_x + (640-505), clock_y + 8), # Bottom right
(160, 160, 160), # Color
-1) # Thickness (-1=opaque/no border
else: # Color space adjustment required within image ...
# Clock background
if hh==alarm_hh and mm==alarm_mm and (s % 2) == 0: # 1s blink during alarm 'minute'
addimage.overlay_png(frame4, x_clock-5, y_clock-40, imgRed3d[:, :, 0:3], imgRed3d[:, :, 3])
if hh==alarm_hh and mm==alarm_mm and (ss % 2) == 0: # 1s blink during alarm 'minute'
addimage.overlay_png(frame4, clock_x - 5, clock_y - 40, img_red3d[:, :, 0:3], img_red3d[:, :, 3])
cv2.rectangle(frame4,
(x_clock-5,y_clock-40), # Top/left
(x_clock+(638-505),y_clock+8), # Bottom right
(255,0,0), # Color
(clock_x - 5, clock_y - 40), # Top/left
(clock_x + (638-505), clock_y + 8), # Bottom right
(255, 0, 0), # Color
2) # Thickness (-1=opaque/no border)
else:
addimage.overlay_png(frame4, x_clock-5, y_clock-40, imgWhite3d[:, :, 0:3], imgWhite3d[:, :, 3])
addimage.overlay_png(frame4, clock_x - 5, clock_y - 40, img_white3d[:, :, 0:3], img_white3d[:, :, 3])
cv2.rectangle(frame4,
(x_clock-5,y_clock-40), # Top/left
(x_clock+(638-505),y_clock+8), # Bottom right
(80,80,80), # Color
(clock_x - 5, clock_y - 40), # Top/left
(clock_x + (638-505), clock_y + 8), # Bottom right
(80, 80, 80), # Color
2) # Thickness (-1=opaque/no border)
if args.ttf != None:
imgPil = Image.fromarray(frame4) # Unfortunately nec. ... (OpenCV->PIL)
draw = ImageDraw.Draw(imgPil) # Get a drawing handle (?!)
draw.text((x_clock+1, y_clock-40), t, font=regularfont, fill=(r_clock,g_clock,b_clock,0)) # Draw actual message
draw.text((clock_x + 1, clock_y - 40), t, font=regularfont, fill=(clock_r, clock_g, clock_b, 0)) # Draw actual message
np_image = np.array(imgPil) # use numpy to convert the pil_image into a numpy array
frame4 = np_image # W/o conversion this shall be a valid cam frame already ...
else:
cv2.putText(frame4, t,
(x_clock,y_clock), # x,y (505,472)
(clock_x, clock_y), # x,y (505,472)
(cv2.FONT_HERSHEY_DUPLEX), # or cv2.QT_FONT_LIGHT), # Font
1.4, # Scaling
(r_clock, g_clock, b_clock), # RGB
(clock_r, clock_g, clock_b), # RGB
4, # Thickness
cv2.LINE_AA)
if args.date != None:
d = time.strftime("%d:%m:%Y", time.localtime())
cv2.putText(frame4, d,
(x_date,y_date), # x,y
dd = time.strftime("%d:%m:%Y", time.localtime())
cv2.putText(frame4, dd,
(date_x, date_y), # x,y
(cv2.FONT_HERSHEY_DUPLEX), # or cv2.QT_FONT_LIGHT), # Font
s_date, # Scaling
(r_date,g_date,b_date), # RGB (255,255,255)
date_s, # Scaling
(date_r, date_g, date_b), # RGB (255,255,255)
1, # Thickness
cv2.LINE_AA)
@ -482,17 +493,17 @@ while(cap.isOpened()):
# Write raw output (to be redirected to video device)
sys.stdout.buffer.write(frame4.tobytes())
else:
print("*** Frame not read?!",file=sys.stderr)
print("*** Frame not read?!", file=sys.stderr)
break
except KeyboardInterrupt: # Quit gracefully ...
break
except Exception as e:
print("\n*** funcam aborted?!",file=sys.stderr)
print(str(e),file=sys.stderr)
print("\n*** funcam aborted?!", file=sys.stderr)
print(str(e), file=sys.stderr)
break
cap.release()
if movie != None:
movie.release()
print("\n<<< funcam terminated. >>>\n",file=sys.stderr)
print("\n<<< funcam terminated. >>>\n", file=sys.stderr)

+ 10
- 9
getvideobyname.py View File

@ -8,27 +8,28 @@ Pick 1st video port by webcam id (requires /sys/class/video4linux layout)
History:
--------
24.05.20/KQ Initial version
27.05.20/KQ Variable naming corrected
"""
import sys # For arg passing only ...
sBasepath = "/sys/class/video4linux/video" # + Index (added later)
sFilename = "/name"
basepath = "/sys/class/video4linux/video" # + Index (added later)
filename = "/name"
# Some argument scanning for start
if len(sys.argv) < 2:
print("usage: getvideobyname <webcam_name>",file=sys.stderr)
exit(0)
sWebcamID = sys.argv[1] # Passed argument
webcam_id = sys.argv[1] # Passed argument
for i in range(0,8,1): # First 8 video devices will be checked
for i in range(0, 8, 1): # First 8 video devices will be checked
try: # As it may fail ...
sFullpath = sBasepath+str(i)+sFilename # Build complete path
with open(sFullpath) as fr: # Try to open the actual file
sCurrentWebcam = (fr.read()).replace("\n","") # Make it a one liner ...
print(sCurrentWebcam+" -> /dev/video"+str(i),file=sys.stderr) # Feedback: What's avail. ...
if sCurrentWebcam.find(sWebcamID) >= 0: # Found!
fullpath = basepath + str(i) + filename # Build complete path
with open(fullpath) as fr: # Try to open the actual file
current_webcam = (fr.read()).replace("\n", "") # Make it a one liner ...
print(current_webcam + " -> /dev/video" + str(i), file=sys.stderr) # Feedback: What's avail. ...
if current_webcam.find(webcam_id) >= 0: # Found!
print(str(i)) # Return Index
break # Done!
except:


+ 4
- 3
getvirtualvideo.py View File

@ -8,6 +8,7 @@ Pick 1st virtual video port
History:
--------
09.05.20/KQ Initial version
27.05.20/KQ Variable naming corrected
"""
import sys
@ -18,13 +19,13 @@ if len(sys.argv) > 1:
else:
basename = "videolist"
with open(basename+"1.txt") as l1:
with open(basename + "1.txt") as l1:
r1 = l1.read()
with open(basename+"2.txt") as l1:
with open(basename + "2.txt") as l1:
r2 = l1.read()
r3 = r2.replace(r1,"") # Skip previously existing entries ...
r3 = r2.replace(r1, "") # Skip previously existing entries ...
if len(r3) > 10:
r4 = r3[0:11] # /dev/video? (Att.: Single digit cam# only!)
print(r4) # Use output '/dev/video?'


+ 130
- 0
kbhit.py View File

@ -0,0 +1,130 @@
#!/usr/bin/env python
# http://simondlevy.academic.wlu.edu/files/software/kbhit.py
'''
A Python class implementing KBHIT, the standard keyboard-interrupt poller.
Works transparently on Windows and Posix (Linux, Mac OS X). Doesn't work
with IDLE.
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as
published by the Free Software Foundation, either version 3 of the
License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
'''
import os
# Windows
if os.name == 'nt':
import msvcrt
# Posix (Linux, OS X)
else:
import sys
import termios
import atexit
from select import select
class KBHit:
def __init__(self):
'''Creates a KBHit object that you can call to do various keyboard things.
'''
if os.name == 'nt':
pass
else:
# Save the terminal settings
self.fd = sys.stdin.fileno()
self.new_term = termios.tcgetattr(self.fd)
self.old_term = termios.tcgetattr(self.fd)
# New terminal setting unbuffered
self.new_term[3] = (self.new_term[3] & ~termios.ICANON & ~termios.ECHO)
termios.tcsetattr(self.fd, termios.TCSAFLUSH, self.new_term)
# Support normal-terminal reset at exit
atexit.register(self.set_normal_term)
def set_normal_term(self):
''' Resets to normal terminal. On Windows this is a no-op.
'''
if os.name == 'nt':
pass
else:
termios.tcsetattr(self.fd, termios.TCSAFLUSH, self.old_term)
def getch(self):
''' Returns a keyboard character after kbhit() has been called.
Should not be called in the same program as getarrow().
'''
s = ''
if os.name == 'nt':
return msvcrt.getch().decode('utf-8')
else:
return sys.stdin.read(1)
def getarrow(self):
''' Returns an arrow-key code after kbhit() has been called. Codes are
0 : up
1 : right
2 : down
3 : left
Should not be called in the same program as getch().
'''
if os.name == 'nt':
msvcrt.getch() # skip 0xE0
c = msvcrt.getch()
vals = [72, 77, 80, 75]
else:
c = sys.stdin.read(3)[2]
vals = [65, 67, 66, 68]
return vals.index(ord(c.decode('utf-8')))
def kbhit(self):
''' Returns True if keyboard character was hit, False otherwise.
'''
if os.name == 'nt':
return msvcrt.kbhit()
else:
dr,dw,de = select([sys.stdin], [], [], 0)
return dr != []
# Test
if __name__ == "__main__":
kb = KBHit()
print('Hit any key, or ESC to exit')
while True:
if kb.kbhit():
c = kb.getch()
if ord(c) == 27: # ESC
break
print(c)
kb.set_normal_term()

+ 5
- 2
virtcam.sh View File

@ -19,6 +19,8 @@
# 6. Enforce user abort in script w/ [Ctrl-C]
# 7. Remove akvcam kernel module (& thus the virtual camera devices)
#
# Hint: After installation of a new kernel, you will have to 'make' akvcam again (kernel module!) ...
#
echo "Running virtcam ..."
NAMEPLATE="Expert (+++)"
@ -51,8 +53,9 @@ echo "*** For best video conference experience (currently, 5/2020) use chromium-
# python3 funcam.py --webcam $WEBCAM --clock 505 472 0x101010 --date 494 428 0xffffff 0.8 --id "$NAMEPLATE" --logo 12 12 100 60 ' LIVE' 'Constance/Germany' emblem.png --rss "file://$PWD/FakeNews.xml" --alarm 21:50 --d3d --ttf DejaVuSansCondensed-Bold.ttf --altb alternate_background.jpg --movie 100 180 150 100 spaceships.mp4 > $CAMPORT
# python3 funcam.py --webcam $WEBCAM --clock 505 472 0x101010 --date 494 428 0xffffff 0.8 --id "$NAMEPLATE" --logo 12 12 100 60 ' LIVE' 'Constance/Germany' emblem.png --turn --rss "https://www.hacknology.de/index.xml" --d3d --ttf DejaVuSansCondensed-Bold.ttf > $CAMPORT
# python3 funcam.py --webcam $WEBCAM --clock 505 472 0x101010 --date 494 428 0xffffff 0.8 --id "$NAMEPLATE" --logo 12 12 100 60 ' LIVE' 'Constance/Germany' emblem.png --turn --rss "https://www.hacknology.de/index.xml" --alarm 21:49 --d3d --ttf DejaVuSansCondensed-Bold.ttf --altb alternate_background.jpg --minimize 320 100 200 200 > $CAMPORT
python3 funcam.py --webcam $WEBCAM --clock 505 472 0x101010 --date 494 428 0xffffff 0.8 --id "$NAMEPLATE" --logo 12 12 100 60 ' LIVE' 'Constance/Germany' emblem.png --turn --rss "https://www.hacknology.de/index.xml" --alarm 21:49 --d3d --ttf DejaVuSansCondensed-Bold.ttf --altb alternate_background.jpg --blur > $CAMPORT
# python3 funcam.py --webcam $WEBCAM --clock 505 472 0x101010 --date 494 428 0xffffff 0.8 --id "$NAMEPLATE" --logo 12 12 100 60 ' LIVE' 'Constance/Germany' emblem.png --turn --rss "https://www.hacknology.de/index.xml" --alarm 21:49 --d3d --ttf DejaVuSansCondensed-Bold.ttf --altb alternate_background.jpg --blur > $CAMPORT
# python3 funcam.py --webcam $WEBCAM --clock 505 472 0x101010 --date 494 428 0xffffff 0.8 --id "$NAMEPLATE" --turn --rss "https://www.hacknology.de/index.xml" --alarm 21:49 --d3d --ttf DejaVuSansCondensed-Bold.ttf --logo 12 12 120 69 ' LIVE' 'Constance/Germany' emblem.png --altb alternate_background_scifi.jpg --greenscreen --movie 100 180 150 100 spaceships.mp4 > $CAMPORT
python3 funcam.py --webcam $WEBCAM --clock 505 472 0x101010 --date 494 428 0xffffff 0.8 --id "$NAMEPLATE" --logo 12 12 100 60 ' LIVE' 'Constance/Germany' emblem.png --turn --rss "https://www.hacknology.de/index.xml" --alarm 21:49 --d3d --ttf DejaVuSansCondensed-Bold.ttf > $CAMPORT
echo "Removing virtual (camera) devices ..."
sudo rmmod akvcam.ko
echo "virtcam stopped."


Loading…
Cancel
Save