Browse Source

Streamlined version w/ simple background substitution

master
kln 11 months ago
parent
commit
1c5769bb9a
6 changed files with 186 additions and 39 deletions
  1. +7
    -1
      README.md
  2. BIN
      alternate_background.jpg
  3. BIN
      emblem.png
  4. +137
    -34
      funcam.py
  5. +32
    -0
      getvirtualvideo.py
  6. +10
    -4
      virtcam.sh

+ 7
- 1
README.md View File

@ -3,10 +3,16 @@
The actual description can be found at
https://www.hacknology.de/projekt/2020/pimpedwebcam/
The file Tux.png can be retrieved from
The emblem.png can be retrieved from
https://upload.wikimedia.org/wikipedia/commons/a/af/Tux.png
(Take this one for now, I will add an original later ...)
As there has been confusion about the correct config.ini for akvcam,
I added a functioning one (store as /etc/akvcam/config.ini - w/ root rights!).
Also: You will need a 3rd gen. Intel i7/equivalent or better ...
11.05.20 New/improved features
Automatic virtual port detection (via getvirtualvideo.py)
Message translation, exception handling, arg passing improved, more general emblem.png introduced
Background removal ('green screen'), beta & thus can be turned off ...

BIN
alternate_background.jpg View File

Before After
Width: 640  |  Height: 480  |  Size: 44 KiB

BIN
emblem.png View File

Before After
Width: 265  |  Height: 314  |  Size: 12 KiB

+ 137
- 34
funcam.py View File

@ -9,6 +9,8 @@ History:
--------
03.05.20/KQ Initial version
06.05.20/KQ Release version
09.05.20/KQ Message translation, exception handling, arg passing improved, more general emblem.png
09.05.20/KQ Background removal ('green screen'), beta & thus really ugly ...
"""
import numpy as np
@ -18,11 +20,36 @@ import datetime, time
import cv2
import feedparser # To install: Activate (cv) environment, then 'pip3 install feedparser'
print("funcam started ...",file=sys.stderr)
print("<<< funcam started ... >>>",file=sys.stderr)
# Hint: print() to stderr (fh#2) as the actual video stream goes to stdout (fh#1)!
# Use RSS feed for bottom scrolling text
d = feedparser.parse('https://www.heise.de/security/rss/news-atom.xml') # TODO: Adjust via parameter or settings file?
print("OpenCV version : "+str(cv2.__version__),file=sys.stderr) # For control purposes
# Some argument scanning for start
if len(sys.argv) > 1:
camera = int(sys.argv[1])
else:
camera = 0
print("Using webcam on : /dev/video"+str(camera),file=sys.stderr)
if len(sys.argv) > 2:
nameplate = sys.argv[2]
else:
nameplate = "Expert (+++)"
print("ID will be : \'"+nameplate+"\'",file=sys.stderr)
if len(sys.argv) > 3:
rssfeed = sys.argv[3]
else:
rssfeed = "https://www.heise.de/security/rss/news-atom.xml"
print("RSS feed : \'"+rssfeed+"\'",file=sys.stderr)
if len(sys.argv) > 4:
alternate_background = sys.argv[4]
print("Alternate background (beta!): \'"+alternate_background+"\'",file=sys.stderr)
else:
alternate_background = None
# Prepare scroll line message as an RSS feed. Currently, this is only read once upon start ...
''' # Example of an actual RSS feed
d = feedparser.parse(rssfeed)
dt0 = datetime.datetime.strptime(d.entries[2].published, '%Y-%m-%dT%H:%M:%S+02:00').strftime('%H:%M')
sRSS0 = dt0 + " " + d.entries[2].title + " +++ "
dt1 = datetime.datetime.strptime(d.entries[1].published, '%Y-%m-%dT%H:%M:%S+02:00').strftime('%H:%M')
@ -30,36 +57,43 @@ sRSS1 = dt1 + " " + d.entries[1].title + " +++ "
dt2 = datetime.datetime.strptime(d.entries[0].published, '%Y-%m-%dT%H:%M:%S+02:00').strftime('%H:%M')
sRSS2 = dt2 + " " + d.entries[0].title + " +++ "
msg = "+++ " + sRSS0 + sRSS1 + sRSS2
msg = msg.replace("ä","ae") # TODO: Replace by regex solution!
msg = msg.replace("ö","oe")
msg = msg.replace("ü","ue")
msg = msg.replace("ß","ss")
msg = msg.replace("Ä","Ae")
msg = msg.replace("Ö","Oe")
msg = msg.replace("Ü","Ue")
# msg = "+++ Reptiloids warn: hacKNology seeks world reign! +++" # For demo purposes ...
lenmsg = len(msg)
msg = msg.translate(str.maketrans({"ä":"ae", "ö":"oe", "ü":"ue", "ß":"ss", "Ä":"Ae", "Ö":"Oe", "Ü":"Ue"}))
'''
# Alternate reality ...
msg = "+++ Reptiloids warn: hacKNology seeks world reign! "\
"+++ National Enquirer: hacKNology may be part of deep state "\
"+++ Disclaimer: This IS fake news! (says a cretan) "
nameplate = "Expert (+++)" # Replace by your own ID data, if length varies adjust rectangle sizes (or provide automatic solution)
# TODO: Should be a script parameter, really!
lenmsg = len(msg)
scroll_x = 500 # Scroll text background offset
src1 = cv2.imread("Tux.png",-1) # TODO: Should become a script parameter as well!
# Prepare station emblem data
src1 = cv2.imread("emblem.png",-1)
if src1 is None:
print("Tux.png not found!",file=sys.stderr)
print("*** emblem.png not found!",file=sys.stderr)
exit(-1)
scr_width = 40 # Initial (full) width
scr_sign = -1 # Start shrinking
alpha = 0.5 # 0..1
alpha = 0.5 # 0.5 (0..1)
beta = (1.0 - alpha)
x_offset = 16
y_offset = 16
scroll_x = 500 # Scroll text background offset
# Prepare background 'greenscreen'
if alternate_background != None:
alternate_frame1 = cv2.imread(alternate_background,-1)
if alternate_frame1 is None:
print("*** "+str(alternate_background)+" not found!",file=sys.stderr)
exit(-1)
alternate_frame = cv2.resize(alternate_frame1, (640,480)) # w/h
#alternate_frame = cv2.cvtColor(alternate_frame2,cv2.COLOR_BGRA2RGB) # Correct color space
# User ID label data
current_state = False # Label starts turned off
current_x = 0 # Initial coordinate offsets for labelling
current_y = 0
#--------------------------------------------------------------------------------------------------------------
# Ref.: https://stackoverflow.com/questions/14063070/overlay-a-smaller-image-on-a-larger-image-python-opencv
def overlay_image_alpha(img, img_overlay, pos, alpha_mask):
"""Overlay img_overlay on top of img at the position specified by
@ -90,14 +124,73 @@ def overlay_image_alpha(img, img_overlay, pos, alpha_mask):
for c in range(channels):
img[y1:y2, x1:x2, c] = (alpha * img_overlay[y1o:y2o, x1o:x2o, c] +
alpha_inv * img[y1:y2, x1:x2, c])
#--------------------------------------------------------------------------------------------------------------
def do_alternate_background(current_frame,background_frame,alternate_frame):
"""Processing pipeline to insert an alternate background (in-place!)
modelled with help from https://medium.com/fnplus/blue-or-green-screen-effect-with-open-cv-chroma-keying-94d4a6ab2743
"""
delta_frame = cv2.absdiff(current_frame,background_frame) # Delta between background & current frame
lower_black = np.array([0, 0, 0])
upper_black = np.array([80, 80, 80])
mask = cv2.inRange(delta_frame, lower_black, upper_black) # Yields black&white
#sys.stdout.buffer.write(mask.tobytes())
# test next: hull = cv.convexHull(cnt)
# Attn.: We're operating on an inverse mask!
# dilate actually increases (white) background
# erode actually increases (black) foreground
kernel2 = np.ones((5,5),np.uint8) # Clear artefacts
mask = cv2.dilate(mask,kernel2,iterations=1)
#kernel = np.ones((63,63),np.uint8) # 31 ok
kernel = np.ones((9,9),np.uint8) # 31 ok
mask2 = cv2.erode(mask,kernel,iterations=7)
#sys.stdout.buffer.write(mask2.tobytes())
kernel2 = np.ones((3,3),np.uint8) # 9,23 ok
mask3 = cv2.dilate(mask2,kernel2,iterations=1)
#sys.stdout.buffer.write(mask3.tobytes())
masked_image = np.copy(current_frame)
masked_image[mask3 != 0] = [0, 0, 0] # From foreground take modified part (us!)
# Now add alternate background
alternate_frame2 = alternate_frame.copy()
alternate_frame2[mask3 == 0] = [0,0,0]
# Now mix both together
return masked_image + alternate_frame2
# Actual video capture loop
try:
cap = cv2.VideoCapture(camera)
if not cap.isOpened(): #if not open already
cap.open() #make sure, we will have data
# Adjust channel resolution & use actual
cap.set(3, 640)
cap.set(4, 480)
except:
print("*** Can't open webcam!",file=sys.stderr)
exit(-2)
print("Please take your chair & get out of the picture!\nWait 5s for continuation message ...",file=sys.stderr)
time.sleep(5) # Time for operator to disappear!
if alternate_background != None: # Shall we provide an alternative background?
for i in range(1,75): # Warmup skip ...
ret, background_frame = cap.read()
if ret!=True:
print("*** Can't create background frame!",file=sys.stderr)
exit(-3)
print("Thank you! You may enter webcam range again ...",file=sys.stderr)
cv2.imshow('Empty room', background_frame)
cv2.waitKey(0)
cv2.destroyAllWindows()
cap = cv2.VideoCapture(0) # Host: /dev/video0 VM: /dev/video1 (Logitech HD Webcam 525)
if not cap.isOpened(): #if not open already
cap.open() #make sure, we will have data
# Adjust channel resolution & use actual
cap.set(3, 640)
cap.set(4, 480)
while(cap.isOpened()):
try:
ret, frame = cap.read()
@ -105,6 +198,9 @@ while(cap.isOpened()):
# Now: The fun stuff! ------------------------------------------
if alternate_background != None: # Green screen look-a-like pipeline
frame = do_alternate_background(frame,background_frame,alternate_frame)
# Emblem stancil (requires BGRA color space!)
scr_width = scr_width + scr_sign
if scr_width <= 4:
@ -117,8 +213,9 @@ while(cap.isOpened()):
(x_offset+((60-scr_width)//2), y_offset),
src2[:, :, 3] / 255.0)
frame4 = cv2.cvtColor(frame.copy(),cv2.COLOR_BGRA2RGB) # Correct color space
frame4 = cv2.cvtColor(frame,cv2.COLOR_BGRA2RGB) # Correct color space
# Emblem subtext
cv2.putText(frame4, "hacKNnology TV",
(4,y_offset+76), # x,y
(cv2.FONT_HERSHEY_DUPLEX), # Font
@ -145,13 +242,13 @@ while(cap.isOpened()):
# Base (text) rectangle
cv2.rectangle(frame4,
(16+current_x,356+24+current_y), # Top/left
(352+current_x,416+24+current_y), # Bottom right
(16+current_x,356+12+current_y), # Top/left
(352+current_x,416+12+current_y), # Bottom right
(255,255,0), # Color
-1) # Thickness
# Link to center
cv2.line(frame4,
(156+current_x,360+24+current_y), # x1,y1
(156+current_x,360+12+current_y), # x1,y1
(280,330), # x2,y2
(255,255,0), # Color
2) # Thickness
@ -168,13 +265,13 @@ while(cap.isOpened()):
2) # Thickness
# Text frame
cv2.rectangle(frame4,
(16+4+current_x,356+24+4+current_y), # Top/left
(352-4+current_x,416+24-4+current_y), # Bottom right
(16+4+current_x,356+12+4+current_y), # Top/left
(352-4+current_x,416+12-4+current_y), # Bottom right
(0,0,0), # Color
-1) # Thickness
# Actual text output
cv2.putText(frame4, s,
(32+current_x, 400+24+current_y), # x,y
(32+current_x, 400+12+current_y), # x,y
(cv2.QT_FONT_NORMAL or cv2.QT_FONT_LIGHT), # Font
1.4, # Scaling
(255, 255, 255), # RGB
@ -247,13 +344,19 @@ while(cap.isOpened()):
# End of fun stuff ... --------------------------------------------
# Write raw output to video device
# Ref.: https://stackoverflow.com/questions/36579542/sending-opencv-output-to-vlc-stream
# Write raw output (to be redirected to video device)
sys.stdout.buffer.write(frame4.tobytes())
else:
print("*** Frame not read?!",file=sys.stderr)
break
except:
except KeyboardInterrupt: # Quit gracefully ...
break
except Exception as e:
print("\n*** funcam aborted?!",file=sys.stderr)
print(str(e),file=sys.stderr)
break
cap.release()
print("\nfuncam terminated.",file=sys.stderr)
print("\n<<< funcam terminated. >>>",file=sys.stderr)

+ 32
- 0
getvirtualvideo.py View File

@ -0,0 +1,32 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
getvirtualvideo.py
Pick 1st virtual video port
History:
--------
09.05.20/KQ Initial version
"""
import sys
# Some argument scanning for start
if len(sys.argv) > 1:
basename = sys.argv[1]
else:
basename = "videolist"
with open(basename+"1.txt") as l1:
r1 = l1.read()
with open(basename+"2.txt") as l1:
r2 = l1.read()
r3 = r2.replace(r1,"") # Skip previously existing entries ...
if len(r3) > 10:
r4 = r3[0:11] # /dev/video? (Att.: Single digit cam# only!)
print(r4) # Use output '/dev/video?'
else: # Will (& shall) fail
print("/dev/videoUnknown")

+ 10
- 4
virtcam.sh View File

@ -5,6 +5,7 @@
# History:
# --------
# 05.05.20/KQ Initial version
# 09.05.20/KQ Automatic virtual port detection (via getvirtualvideo.py)
#
# 1. Install akvcam virtual camera devices
# 2. Activate (cv) environment script
@ -16,7 +17,9 @@
# 7. Remove akvcam kernel module (& thus the virtual camera devices)
#
echo "Installing video4 (output) / video5 (capture) virtual devices ..."
cd ~/cv
echo "Installing video<n> (output) / video<n+1> (capture) virtual devices ..."
ls /dev/video? >videolist1.txt
cd ~/akvcam/src
sudo modprobe videodev
sudo insmod akvcam.ko
@ -26,9 +29,12 @@ echo "Starting 'stream modifier' application ..."
echo "Activating (cv) environment ..."
source ~/cv/bin/activate
cd ~/cv
ls /dev/video?
echo "*** For best video conference experience (currently) use chromium-browser ..."
python3 funcam.py > /dev/video4 # video4 is output device -> video4 is capture
ls /dev/video? >videolist2.txt
CAMPORT=`python3 getvirtualvideo.py videolist`
echo "1st virtual video port is: " $CAMPORT
echo "Actual webcam hardware assumed as: /dev/video0"
echo "*** For best video conference experience (currently, 5/2020) use chromium-browser ..."
python3 funcam.py 0 "Expert (+++)" "https://www.heise.de/security/rss/news-atom.xml" alternate_background.jpg > $CAMPORT # webcam assumed as /dev/video0
echo "Removing virtual (camera) devices ..."
sudo rmmod akvcam.ko


Loading…
Cancel
Save