Pimp my webcam (stream) project
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
 
 

509 lines
26 KiB

#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
funcam.py
Camera stream modification
History:
--------
03.05.20/KQ Initial version
06.05.20/KQ Release version
09.05.20/KQ Message translation, exception handling, arg passing improved, more general emblem.png
09.05.20/KQ Background removal ('green screen'), beta & thus really ugly ...
12.05.20/KQ Round kernels shall improve green screen (a little bit), local file rss feed provided, alarm time introduced
13.05.20/KQ Subjective improvement: convex hull/bounding box (not so great ...)
15.05.20/KQ Idea: How about increasing 'ROI'? Yup, works! (One of my better inspirations ... remember: Put a hat on!)
bounding box turned off by default, alarm bug fixed, finally our own RSS feed works as well ...
Argument parsing improved
16.05.20/KQ More 3D elements now
17.05.20/KQ TrueType fonts integrated (via PIL/pillow)
19.05.20/KQ Permit background movie play (w/o sound)
21.05.20/KQ Introduced coordinates as parameters
22.05.20/KQ Blur background (from original webcam) added, minimized original stream embedding new
23.05.20/KQ Clock & date parameters (permitting minimal version)
25.05.20/KQ Modularized, re-factored, transparency effect
26.05.20/KQ Bug fixes, logo turning effect now optionally selectable
27.05.20/KQ Variable naming corrected
22.06.20/KQ Actual greenscreen function option (w/ auto-adaptation!) added (requires a 'defined' i.e. green (or blue) background!)
"""
import argparse
import numpy as np
import random
import sys
import datetime, time
import cv2
import feedparser # To install: Activate (cv) environment, then 'pip3 install feedparser'
import addimage
import background
parser = argparse.ArgumentParser(prog='python3 funcam.py', usage="%(prog)s --webcam <n> [--clock <x> <y> <color>][--date <x> <y> <color> <scale>][--id <ID>][--rss <url>][--logo <x> <y> <w> <h> <s1> <s2> <png>][--turn][--alarm HH:MM][--altb <path>][--blur][--minimize <x> <y> <w> <h>][--d3d][--ttf <font>][--movie <x> <y> <w> <h> <path>]",
description='A stream modification utility',
epilog="Dont't forget to redirect stdout to the video device")
parser.add_argument('--webcam', type=int, choices=[0, 1, 2, 3, 4], nargs='?', help='Webcam index 0..4, will be used to access webcam device /dev/video<n>', required=True)
parser.add_argument('--clock', nargs=3, help='Show clock @x,y w/ <hexcolor>')
parser.add_argument('--date', nargs=4, help='Show date @x,y w/ <hexcolor> <float_scale>')
parser.add_argument('--id', nargs='?', help='Id data shown on the occassional nameplate')
parser.add_argument('--rss', nargs='?', help='RSS feed link')
parser.add_argument('--logo', nargs=7, help='Station logo @x,y w/ w,h, subtext lines <s1> & <s2>')
parser.add_argument('--turn', action='store_true', help='Turning effect for logo (add-on for --logo option)')
parser.add_argument('--alarm', nargs='?', help='Alarm time, format as HH:MM')
parser.add_argument('--altb', nargs='?', help='Alternate background image path')
parser.add_argument('--blur', action='store_true', help='Blur background (add-on for --altb option)')
parser.add_argument('--greenscreen', action='store_true', help='Greenscreen available (add-on for --altb option)')
parser.add_argument('--minimize', nargs=4, help='Display original stream resized @x,y w/ w,h (add-on for --altb option)')
parser.add_argument('--d3d', action='store_true', help='Draw w/ 3D elements')
parser.add_argument('--ttf', nargs='?', help='Use TrueType font <font> (Attn.: Scaling not used w/ ttf)')
parser.add_argument('--movie', nargs=5, help='Display a background movie @x,y w/ w,h')
args = parser.parse_args()
print("\n<<< funcam started ... >>>", file=sys.stderr)
# Hint: print() to stderr (fh#2) as the actual video stream goes to stdout (fh#1)!
print("OpenCV version : "+str(cv2.__version__),file=sys.stderr) # For control purposes
# Prepare clock
if args.clock != None:
clock_x = int(args.clock[0]) #505
clock_y = int(args.clock[1]) #472
clock_c = int(args.clock[2], 16) #(16,16,16)
clock_r = clock_c >> 16
clock_g = (clock_c & 0xff00) >> 8
clock_b = clock_c & 0xff
# Prepare date
if args.date != None:
date_x = int(args.date[0]) #494
date_y = int(args.date[1]) #428
date_c = int(args.date[2], 16) #(255,255,255)
date_r = date_c >> 16
date_g = (date_c & 0xff00) >> 8
date_b = date_c & 0xff
date_s = float(args.date[3]) #0.8
# Some argument scanning for start
print("Using webcam on : /dev/video" + str(args.webcam), file=sys.stderr)
if args.clock:
print("Clock display : x/y=" + str(args.clock[0]) + "/" + str(args.clock[1]) + " color=" + str(clock_r) + "," + str(clock_g) + "," + str(clock_b), file=sys.stderr)
if args.date:
print("Date display : x/y=" + str(args.date[0]) + "/" + str(args.date[1]) + " color=" + str(date_r) + "," + str(date_g) + "," + str(date_b) + " scale=" + str(date_s), file=sys.stderr)
if args.id != None:
print("ID will be : \'" + args.id + "\'", file=sys.stderr)
if args.logo != None:
print("Logo image file used : \'"+ args.logo[6] +"\' x/y=" + str(args.logo[0]) + "/" + str(args.logo[1]) + " w/h=" + str(args.logo[2]) + "/" + str(args.logo[3]) + " \'" + args.logo[4] + "\',\'" + args.logo[5] + "\'", file=sys.stderr)
if args.turn:
print("Logo turning effect : Enabled", file=sys.stderr)
if args.rss != None:
print("RSS feed : \'" + args.rss + "\'", file=sys.stderr)
if args.alarm != None:
alarm_hh = int(args.alarm[0:2])
alarm_mm = int(args.alarm[3:5])
print("Alarm time : " + str(alarm_hh) + ":" + str(alarm_mm), file=sys.stderr) # Alarm time
else: # Default off
alarm_hh = -1
alarm_mm = -1
alternate_background = args.altb
if args.altb != None:
print("Alternate background (beta!): \'" + args.altb + "\'", file=sys.stderr)
if args.blur:
print("Background blur : Enabled", file=sys.stderr)
if args.greenscreen:
print("Background greenscreen : Enabled", file=sys.stderr)
if args.minimize != None:
print("Original stream minimized : x/y=" + str(args.minimize[0]) + "/" + str(args.minimize[1]) + " w/h=" + str(args.minimize[2]) + "/" + str(args.minimize[3]), file=sys.stderr)
if args.d3d:
print("3D element draw : Enabled", file=sys.stderr)
if args.ttf != None:
print("TrueType font : " + args.ttf + " (from /usr/share/fonts)", file=sys.stderr)
if args.movie != None:
print("Background movie : \'" + args.movie[4] + "\' x/y=" + str(args.movie[0]) + "/" + str(args.movie[1]) + " w/h=" + str(args.movie[2]) + "/" + str(args.movie[3]), file=sys.stderr)
# Prepare scroll line message as an RSS feed. Currently, this is only read once upon start ...
if args.rss != None:
fparser = feedparser.parse(args.rss)
print("Feed entries: " + str(len(fparser.entries)), file=sys.stderr)
rss_n = len(fparser.entries)
if len(fparser.entries) < 1:
msg = "+++ No/invalid feed data? "
else:
if rss_n > 10: # Limit to the latter stuff only ...
rss_n = 10
#try:
msg = "+++ "
for i in range(0, rss_n, 1):
try:
msg = msg + datetime.datetime.strptime(fparser.entries[i].published, '%Y-%m-%dT%H:%M:%S+02:00').strftime('%H:%M')\
+ " " + fparser.entries[i].title + " +++ "
except:
msg = msg + datetime.datetime.strptime(fparser.entries[i].published, '%a, %d %b %Y %H:%M:%S %z').strftime('%d.%m.%y')\
+ " " + fparser.entries[i].title + " +++ "
#msg = msg + " " + fparser.entries[i].title + " +++ "
msg = msg.translate(str.maketrans({"ä":"ae", "ö":"oe", "ü":"ue", "ß":"ss", "Ä":"Ae", "Ö":"Oe", "Ü":"Ue"})) # OpenCV fonts lack umlauts
lenmsg = len(msg)
scroll_x = 500 # Scroll text background offset
# Prepare station emblem data
if args.logo != None:
logo1 = cv2.imread(args.logo[6], -1)
if logo1 is None:
print("*** " + args.logo[6] + " not found!", file=sys.stderr)
exit(-1)
logo_x = int(args.logo[0]) #12
logo_y = int(args.logo[1]) #12
max_logo_width = int(args.logo[2]) #100/40 # Initial (full) width
logo_w = max_logo_width
logo_h = int(args.logo[3]) #60 # Height
logo_sign = -1 # Start shrinking
if args.turn != True:
logo2 = cv2.resize(logo1, (logo_w, logo_h)) # w/h
# Prepare 3D background elements
if args.d3d:
# Scroll line background
img_whiteline3d = cv2.imread("whiteline3d.png", -1)
if img_whiteline3d is None:
print("*** whiteline3d.png not found!", file=sys.stderr)
exit(-1)
img_whiteline3d = cv2.resize(img_whiteline3d, (500,30)) # w/h
# Clock background
img_white3d = cv2.imread("white3d.png", -1)
if img_white3d is None:
print("*** white3d.png not found!", file=sys.stderr)
exit(-1)
img_white3d = cv2.resize(img_white3d, (640-500,480-432)) # w/h
img_red3d = cv2.imread("red3d.png", -1) # Color space adjust included (required!)
if img_red3d is None:
print("*** red3d.png not found!", file=sys.stderr)
exit(-1)
img_red3d = cv2.resize(img_red3d, (640-500,480-432)) # w/h
if args.ttf != None: # TrueType fonts enabled?
# Pillow used for TrueType font integration
from PIL import Image, ImageFont, ImageDraw # To install: Activate (cv) environment, then 'pip3 install pillow'
# Some refs.:
# https://stackoverflow.com/questions/43232813/convert-opencv-image-format-to-pil-image-format
# https://pillow.readthedocs.io/en/2.8.1/reference/ImageFont.html
# https://stackoverflow.com/questions/37191008/load-truetype-font-to-opencv
# Use a TrueType font from /usr/share/fonts (search for *.ttf!)
# Bold small fonts: Liberation Sans Narrow Bold, Nimbus Sans L Bold Condensed, Ubuntu Condensed Bold
regularfont = ImageFont.truetype(args.ttf, 44) #"DejaVuSansCondensed-Bold.ttf", 44) # Height for clock
boldfont = ImageFont.truetype(args.ttf, 24) #"DejaVuSansCondensed-Bold.ttf", 24) # Height for scroll line
# Prepare background movie
if args.movie != None:
movie_x = int(args.movie[0]) #100
movie_y = int(args.movie[1]) #180
movie_w = int(args.movie[2]) #150
movie_h = int(args.movie[3]) #100
movie = cv2.VideoCapture(args.movie[4])
if not movie.isOpened(): #if not open already
movie.open() #make sure, we will have data
else:
movie = None
# Prepare background 'greenscreen'
if alternate_background != None:
alternate_frame1 = cv2.imread(args.altb, -1)
if alternate_frame1 is None:
print("*** "+str(args.altb) + " not found!", file=sys.stderr)
exit(-1)
alternate_frame = cv2.resize(alternate_frame1, (640,480)) # w/h
#alternate_frame = cv2.cvtColor(alternate_frame2,cv2.COLOR_BGRA2RGB) # Correct color space
# Prepare minimized original stream
if args.minimize != None:
mini_x = int(args.minimize[0])
mini_y = int(args.minimize[1])
mini_w = int(args.minimize[2])
mini_h = int(args.minimize[3])
# User ID label data
label_state = False # Label starts turned off
label_x = 0 # Initial coordinate offsets for labelling
label_y = 0
# Actual video capture loop
try:
cap = cv2.VideoCapture(int(args.webcam))
if not cap.isOpened(): #if not open already
cap.open() #make sure, we will have data
# Adjust channel resolution & use actual
cap.set(3, 640)
cap.set(4, 480)
except:
print("*** Can't open webcam!",file=sys.stderr)
exit(-2)
if alternate_background != None: # Shall we provide an alternative background?
print("1. Please pick your chair & get out of the picture together!\n2. Wait 5s for continuation message ...", file=sys.stderr)
time.sleep(3) # Time for operator to disappear!
for i in range(1, 75): # Warmup skip ... (~2s)
ret, background_frame = cap.read()
if ret != True:
print("*** Can't create background frame!", file=sys.stderr)
exit(-3)
if args.greenscreen:
background.init_rgb_filter(background_frame)
print("\0073. Thank you! You may enter webcam range again ...\n", file=sys.stderr)
print("- Start your webcam application & connect to the virtual camera\n"\
"- Press [Ctrl][C] to abort ...", file=sys.stderr)
#--------------------------------------------------------------------------------------------------------------
while(cap.isOpened()):
try:
ret, frame = cap.read()
if ret == True:
hh = int(time.strftime("%H", time.localtime())) # Alarm time needed for processing
mm = int(time.strftime("%M", time.localtime()))
ss = int(time.strftime("%S", time.localtime())) # Seconds (needed later as well ...)
# Now: The fun stuff! ------------------------------------------
if alternate_background != None: # Green screen look-a-like pipeline
if movie != None: # Play a movie?
try:
ret, fmovie = movie.read() # Next frame ...
#ret, fmovie = movie.read() # Next frame ... (speed-up nec.!)
fmovie = cv2.resize(fmovie, (movie_w, movie_h)) # w/h
alternate_frame2 = alternate_frame.copy()
alternate_frame2[movie_y:movie_y+movie_h, movie_x:movie_x+movie_w] = fmovie[0:movie_h, 0:movie_w] # Select 'ROI' only: y1:y2, x1:x2
except Exception as em: # EOF?
#print(str(em),file=sys.stderr)
movie.release() # Free former
movie = cv2.VideoCapture(args.movie[4]) # Re-open again
if not movie.isOpened(): #if not open already
movie.open() #make sure, we will have data
alternate_frame2 = alternate_frame.copy()
else: # No movie
alternate_frame2 = alternate_frame.copy()
if args.minimize != None: # Use simple resizing only
if args.blur: # Blurring requires special processing
if args.greenscreen: # We have a defined background 'greenscreen' in place
frame = background.do_alternate_greenscreen(frame, background_frame, alternate_frame2, True)
else:
frame = background.do_alternate_background(frame, background_frame, alternate_frame2, True)
fmini = cv2.resize(frame, (mini_w,mini_h)) # w/h
alternate_frame2[mini_y:mini_y+mini_h, mini_x:mini_x+mini_w] = fmini[0:mini_h, 0:mini_w] # Select 'ROI' only: y1:y2, x1:x2
frame = alternate_frame2
else: # Actual mask processing otherwise ...
if args.greenscreen:
frame = background.do_alternate_greenscreen(frame, background_frame, alternate_frame2, args.blur)
else:
frame = background.do_alternate_background(frame, background_frame, alternate_frame2, args.blur)
# Emblem stancil (requires BGRA color space!)
if args.logo != None:
if args.turn:
logo_w = logo_w + logo_sign
if logo_w <= 4:
logo_sign = 1 # Start increasing
if logo_w >= max_logo_width:
logo_sign = -1 # & shrinking again
logo2 = cv2.resize(logo1, (logo_w, logo_h)) # w/h
# New version w/ transparency effect!
addimage.overlay_png(frame, logo_x + ((max_logo_width - logo_w)//2), logo_y, logo2[:, :, 0:3], logo2[:, :, 3])
if args.d3d:
# Scroll line background
if args.rss != None:
addimage.overlay_png(frame, 0, 450, img_whiteline3d[:, :, 0:3], img_whiteline3d[:, :, 3])
frame4 = cv2.cvtColor(frame, cv2.COLOR_BGRA2RGB) # Correct color space
# Emblem subtext
if args.logo != None:
cv2.putText(frame4, args.logo[4], #" LIVE",
(logo_x - 8, logo_y + logo_h + 4), # x,y
(cv2.FONT_HERSHEY_DUPLEX), # Font
0.4, # Scaling
(255, 255, 255), # RGB
1, # Thickness
cv2.LINE_AA)
cv2.putText(frame4, args.logo[5], #"Constance/Germany",
(logo_x - 8, logo_y + logo_h + 12), # x,y
(cv2.FONT_HERSHEY_DUPLEX), # Font
0.4, # Scaling
(255, 255, 255), # RGB
1, # Thickness
cv2.LINE_AA)
# Random walk text label
if args.id != None:
if ss < 10:
label_state = True
else:
label_state = False
label_x = 0
label_y = 0
if label_state:
new_x_vector = random.randrange(-1, 2, 1) # yields -2..2
new_y_vector = random.randrange(-1, 2, 1) # yields -2..2
label_x = label_x + new_x_vector
label_y = label_y + new_y_vector
# Base (text) rectangle
cv2.rectangle(frame4,
(16 + label_x, 356 + 12 + label_y), # Top/left
(352 + label_x, 416 + 12 + label_y), # Bottom right
(255, 255, 0), # Color
-1) # Thickness
# Link to center
if args.minimize != None: # Variable center?
center_x = mini_x + int(mini_w * 1/3)
center_y = mini_y + int(mini_h * 2/3)
else: # No, fixed center!
center_x = 280
center_y = 330
cv2.line(frame4,
(156 + label_x, 360 + 12 + label_y), # x1,y1
(center_x, center_y), # x2,y2
(255, 255, 0), # Color
2) # Thickness
# Center cross hair
cv2.line(frame4,
(center_x - 8, center_y), # x1,y1
(center_x + 8, center_y), # x2,y2
(255, 255, 255), # Color
2) # Thickness
cv2.line(frame4,
(center_x, center_y - 8), # x1,y1
(center_x, center_y + 8), # x2,y2
(255, 255, 255), # Color
2) # Thickness
# Text frame
cv2.rectangle(frame4,
(16 + 4 + label_x, 356 + 12 + 4 + label_y), # Top/left
(352 - 4 + label_x, 416 + 12 - 4 + label_y), # Bottom right
(0, 0, 0), # Color
-1) # Thickness
# Actual text output
cv2.putText(frame4, args.id,
(32 + label_x, 400 + 12 + label_y), # x,y
(cv2.QT_FONT_NORMAL or cv2.QT_FONT_LIGHT), # Font
1.4, # Scaling
(255, 255, 255), # RGB
2, # Thickness
cv2.LINE_AA)
# Bottom scroll text background
if args.rss != None:
if args.d3d == False:
cv2.rectangle(frame4,
(0, 450), # Top/left
(640, 480), # Bottom right
(255, 255, 255), # Color
-1) # Thickness (-1=opaque/no border
if args.ttf != None:
imgPil = Image.fromarray(frame4) # Unfortunately nec. ... (OpenCV->PIL)
draw = ImageDraw.Draw(imgPil) # Get a drawing handle (?!)
draw.text((scroll_x, 452), msg, font=boldfont, fill=(16, 16, 16, 0)) # Draw actual message
np_image = np.array(imgPil) # use numpy to convert the pil_image into a numpy array
frame4 = np_image # W/o conversion this shall be a valid cam frame already ...
else:
cv2.putText(frame4, msg,
(scroll_x, 472), # x,y
(cv2.QT_FONT_NORMAL or cv2.QT_FONT_LIGHT), # Font
0.8, #1.0, # Scaling
(0, 0, 0), #(64,64,64), #(0, 0, 128), # RGB
1, #2, # Thickness
cv2.LINE_AA)
if args.d3d == False:
cv2.line(frame4,
(498, 440), # x1,y1
(498, 480), # x2,y2
(200, 200, 200), # Color
2) # Thickness
cv2.line(frame4,
(0, 440), # x1,y1
(640, 440), # x2,y2
(200, 200, 200), # Color
1) # Thickness
scroll_x = scroll_x - 6
if scroll_x < -lenmsg * 4.5 * 3: # was: -500 ...
scroll_x = 500
# Time output
if args.clock != None:
t = time.strftime("%H:%M", time.localtime()) # Was: %H:%M:%S ...
if args.d3d == False:
if hh==alarm_hh and mm==alarm_mm and (ss % 2) == 0: # 1s blink during alarm 'minute'
cv2.rectangle(frame4,
(clock_x -5, clock_y - 40), # Top/left 505,472
(clock_x + (640-505), clock_y + 8), # Bottom right
(255, 0, 0), # Color
-1) # Thickness (-1=opaque/no border
else:
cv2.rectangle(frame4,
(clock_x - 5, clock_y - 40), # Top/left
(clock_x + (640-505), clock_y + 8), # Bottom right
(160, 160, 160), # Color
-1) # Thickness (-1=opaque/no border
else: # Color space adjustment required within image ...
# Clock background
if hh==alarm_hh and mm==alarm_mm and (ss % 2) == 0: # 1s blink during alarm 'minute'
addimage.overlay_png(frame4, clock_x - 5, clock_y - 40, img_red3d[:, :, 0:3], img_red3d[:, :, 3])
cv2.rectangle(frame4,
(clock_x - 5, clock_y - 40), # Top/left
(clock_x + (638-505), clock_y + 8), # Bottom right
(255, 0, 0), # Color
2) # Thickness (-1=opaque/no border)
else:
addimage.overlay_png(frame4, clock_x - 5, clock_y - 40, img_white3d[:, :, 0:3], img_white3d[:, :, 3])
cv2.rectangle(frame4,
(clock_x - 5, clock_y - 40), # Top/left
(clock_x + (638-505), clock_y + 8), # Bottom right
(80, 80, 80), # Color
2) # Thickness (-1=opaque/no border)
if args.ttf != None:
imgPil = Image.fromarray(frame4) # Unfortunately nec. ... (OpenCV->PIL)
draw = ImageDraw.Draw(imgPil) # Get a drawing handle (?!)
draw.text((clock_x + 1, clock_y - 40), t, font=regularfont, fill=(clock_r, clock_g, clock_b, 0)) # Draw actual message
np_image = np.array(imgPil) # use numpy to convert the pil_image into a numpy array
frame4 = np_image # W/o conversion this shall be a valid cam frame already ...
else:
cv2.putText(frame4, t,
(clock_x, clock_y), # x,y (505,472)
(cv2.FONT_HERSHEY_DUPLEX), # or cv2.QT_FONT_LIGHT), # Font
1.4, # Scaling
(clock_r, clock_g, clock_b), # RGB
4, # Thickness
cv2.LINE_AA)
if args.date != None:
dd = time.strftime("%d:%m:%Y", time.localtime())
cv2.putText(frame4, dd,
(date_x, date_y), # x,y
(cv2.FONT_HERSHEY_DUPLEX), # or cv2.QT_FONT_LIGHT), # Font
date_s, # Scaling
(date_r, date_g, date_b), # RGB (255,255,255)
1, # Thickness
cv2.LINE_AA)
# End of fun stuff ... --------------------------------------------
# Ref.: https://stackoverflow.com/questions/36579542/sending-opencv-output-to-vlc-stream
# Write raw output (to be redirected to video device)
sys.stdout.buffer.write(frame4.tobytes())
else:
print("*** Frame not read?!", file=sys.stderr)
break
except KeyboardInterrupt: # Quit gracefully ...
break
except Exception as e:
print("\n*** funcam aborted?!", file=sys.stderr)
print(str(e), file=sys.stderr)
break
cap.release()
if movie != None:
movie.release()
print("\n<<< funcam terminated. >>>\n", file=sys.stderr)