|
|
@ -9,6 +9,8 @@ History: |
|
|
|
-------- |
|
|
|
03.05.20/KQ Initial version |
|
|
|
06.05.20/KQ Release version |
|
|
|
09.05.20/KQ Message translation, exception handling, arg passing improved, more general emblem.png |
|
|
|
09.05.20/KQ Background removal ('green screen'), beta & thus really ugly ... |
|
|
|
""" |
|
|
|
|
|
|
|
import numpy as np |
|
|
@ -18,11 +20,36 @@ import datetime, time |
|
|
|
import cv2 |
|
|
|
import feedparser # To install: Activate (cv) environment, then 'pip3 install feedparser' |
|
|
|
|
|
|
|
print("funcam started ...",file=sys.stderr) |
|
|
|
print("<<< funcam started ... >>>",file=sys.stderr) |
|
|
|
# Hint: print() to stderr (fh#2) as the actual video stream goes to stdout (fh#1)! |
|
|
|
|
|
|
|
# Use RSS feed for bottom scrolling text |
|
|
|
d = feedparser.parse('https://www.heise.de/security/rss/news-atom.xml') # TODO: Adjust via parameter or settings file? |
|
|
|
print("OpenCV version : "+str(cv2.__version__),file=sys.stderr) # For control purposes |
|
|
|
|
|
|
|
# Some argument scanning for start |
|
|
|
if len(sys.argv) > 1: |
|
|
|
camera = int(sys.argv[1]) |
|
|
|
else: |
|
|
|
camera = 0 |
|
|
|
print("Using webcam on : /dev/video"+str(camera),file=sys.stderr) |
|
|
|
if len(sys.argv) > 2: |
|
|
|
nameplate = sys.argv[2] |
|
|
|
else: |
|
|
|
nameplate = "Expert (+++)" |
|
|
|
print("ID will be : \'"+nameplate+"\'",file=sys.stderr) |
|
|
|
if len(sys.argv) > 3: |
|
|
|
rssfeed = sys.argv[3] |
|
|
|
else: |
|
|
|
rssfeed = "https://www.heise.de/security/rss/news-atom.xml" |
|
|
|
print("RSS feed : \'"+rssfeed+"\'",file=sys.stderr) |
|
|
|
if len(sys.argv) > 4: |
|
|
|
alternate_background = sys.argv[4] |
|
|
|
print("Alternate background (beta!): \'"+alternate_background+"\'",file=sys.stderr) |
|
|
|
else: |
|
|
|
alternate_background = None |
|
|
|
|
|
|
|
# Prepare scroll line message as an RSS feed. Currently, this is only read once upon start ... |
|
|
|
''' # Example of an actual RSS feed |
|
|
|
d = feedparser.parse(rssfeed) |
|
|
|
dt0 = datetime.datetime.strptime(d.entries[2].published, '%Y-%m-%dT%H:%M:%S+02:00').strftime('%H:%M') |
|
|
|
sRSS0 = dt0 + " " + d.entries[2].title + " +++ " |
|
|
|
dt1 = datetime.datetime.strptime(d.entries[1].published, '%Y-%m-%dT%H:%M:%S+02:00').strftime('%H:%M') |
|
|
@ -30,36 +57,43 @@ sRSS1 = dt1 + " " + d.entries[1].title + " +++ " |
|
|
|
dt2 = datetime.datetime.strptime(d.entries[0].published, '%Y-%m-%dT%H:%M:%S+02:00').strftime('%H:%M') |
|
|
|
sRSS2 = dt2 + " " + d.entries[0].title + " +++ " |
|
|
|
msg = "+++ " + sRSS0 + sRSS1 + sRSS2 |
|
|
|
msg = msg.replace("ä","ae") # TODO: Replace by regex solution! |
|
|
|
msg = msg.replace("ö","oe") |
|
|
|
msg = msg.replace("ü","ue") |
|
|
|
msg = msg.replace("ß","ss") |
|
|
|
msg = msg.replace("Ä","Ae") |
|
|
|
msg = msg.replace("Ö","Oe") |
|
|
|
msg = msg.replace("Ü","Ue") |
|
|
|
# msg = "+++ Reptiloids warn: hacKNology seeks world reign! +++" # For demo purposes ... |
|
|
|
lenmsg = len(msg) |
|
|
|
msg = msg.translate(str.maketrans({"ä":"ae", "ö":"oe", "ü":"ue", "ß":"ss", "Ä":"Ae", "Ö":"Oe", "Ü":"Ue"})) |
|
|
|
''' |
|
|
|
# Alternate reality ... |
|
|
|
msg = "+++ Reptiloids warn: hacKNology seeks world reign! "\ |
|
|
|
"+++ National Enquirer: hacKNology may be part of deep state "\ |
|
|
|
"+++ Disclaimer: This IS fake news! (says a cretan) " |
|
|
|
|
|
|
|
nameplate = "Expert (+++)" # Replace by your own ID data, if length varies adjust rectangle sizes (or provide automatic solution) |
|
|
|
# TODO: Should be a script parameter, really! |
|
|
|
lenmsg = len(msg) |
|
|
|
scroll_x = 500 # Scroll text background offset |
|
|
|
|
|
|
|
src1 = cv2.imread("Tux.png",-1) # TODO: Should become a script parameter as well! |
|
|
|
# Prepare station emblem data |
|
|
|
src1 = cv2.imread("emblem.png",-1) |
|
|
|
if src1 is None: |
|
|
|
print("Tux.png not found!",file=sys.stderr) |
|
|
|
print("*** emblem.png not found!",file=sys.stderr) |
|
|
|
exit(-1) |
|
|
|
scr_width = 40 # Initial (full) width |
|
|
|
scr_sign = -1 # Start shrinking |
|
|
|
alpha = 0.5 # 0..1 |
|
|
|
alpha = 0.5 # 0.5 (0..1) |
|
|
|
beta = (1.0 - alpha) |
|
|
|
x_offset = 16 |
|
|
|
y_offset = 16 |
|
|
|
|
|
|
|
scroll_x = 500 # Scroll text background offset |
|
|
|
# Prepare background 'greenscreen' |
|
|
|
if alternate_background != None: |
|
|
|
alternate_frame1 = cv2.imread(alternate_background,-1) |
|
|
|
if alternate_frame1 is None: |
|
|
|
print("*** "+str(alternate_background)+" not found!",file=sys.stderr) |
|
|
|
exit(-1) |
|
|
|
alternate_frame = cv2.resize(alternate_frame1, (640,480)) # w/h |
|
|
|
#alternate_frame = cv2.cvtColor(alternate_frame2,cv2.COLOR_BGRA2RGB) # Correct color space |
|
|
|
|
|
|
|
# User ID label data |
|
|
|
current_state = False # Label starts turned off |
|
|
|
current_x = 0 # Initial coordinate offsets for labelling |
|
|
|
current_y = 0 |
|
|
|
|
|
|
|
#-------------------------------------------------------------------------------------------------------------- |
|
|
|
# Ref.: https://stackoverflow.com/questions/14063070/overlay-a-smaller-image-on-a-larger-image-python-opencv |
|
|
|
def overlay_image_alpha(img, img_overlay, pos, alpha_mask): |
|
|
|
"""Overlay img_overlay on top of img at the position specified by |
|
|
@ -90,14 +124,73 @@ def overlay_image_alpha(img, img_overlay, pos, alpha_mask): |
|
|
|
for c in range(channels): |
|
|
|
img[y1:y2, x1:x2, c] = (alpha * img_overlay[y1o:y2o, x1o:x2o, c] + |
|
|
|
alpha_inv * img[y1:y2, x1:x2, c]) |
|
|
|
#-------------------------------------------------------------------------------------------------------------- |
|
|
|
|
|
|
|
def do_alternate_background(current_frame,background_frame,alternate_frame): |
|
|
|
"""Processing pipeline to insert an alternate background (in-place!) |
|
|
|
modelled with help from https://medium.com/fnplus/blue-or-green-screen-effect-with-open-cv-chroma-keying-94d4a6ab2743 |
|
|
|
""" |
|
|
|
delta_frame = cv2.absdiff(current_frame,background_frame) # Delta between background & current frame |
|
|
|
|
|
|
|
lower_black = np.array([0, 0, 0]) |
|
|
|
upper_black = np.array([80, 80, 80]) |
|
|
|
mask = cv2.inRange(delta_frame, lower_black, upper_black) # Yields black&white |
|
|
|
#sys.stdout.buffer.write(mask.tobytes()) |
|
|
|
|
|
|
|
# test next: hull = cv.convexHull(cnt) |
|
|
|
|
|
|
|
# Attn.: We're operating on an inverse mask! |
|
|
|
# dilate actually increases (white) background |
|
|
|
# erode actually increases (black) foreground |
|
|
|
kernel2 = np.ones((5,5),np.uint8) # Clear artefacts |
|
|
|
mask = cv2.dilate(mask,kernel2,iterations=1) |
|
|
|
|
|
|
|
#kernel = np.ones((63,63),np.uint8) # 31 ok |
|
|
|
kernel = np.ones((9,9),np.uint8) # 31 ok |
|
|
|
mask2 = cv2.erode(mask,kernel,iterations=7) |
|
|
|
#sys.stdout.buffer.write(mask2.tobytes()) |
|
|
|
|
|
|
|
kernel2 = np.ones((3,3),np.uint8) # 9,23 ok |
|
|
|
mask3 = cv2.dilate(mask2,kernel2,iterations=1) |
|
|
|
#sys.stdout.buffer.write(mask3.tobytes()) |
|
|
|
|
|
|
|
masked_image = np.copy(current_frame) |
|
|
|
masked_image[mask3 != 0] = [0, 0, 0] # From foreground take modified part (us!) |
|
|
|
|
|
|
|
# Now add alternate background |
|
|
|
alternate_frame2 = alternate_frame.copy() |
|
|
|
alternate_frame2[mask3 == 0] = [0,0,0] |
|
|
|
|
|
|
|
# Now mix both together |
|
|
|
return masked_image + alternate_frame2 |
|
|
|
|
|
|
|
|
|
|
|
# Actual video capture loop |
|
|
|
try: |
|
|
|
cap = cv2.VideoCapture(camera) |
|
|
|
if not cap.isOpened(): #if not open already |
|
|
|
cap.open() #make sure, we will have data |
|
|
|
# Adjust channel resolution & use actual |
|
|
|
cap.set(3, 640) |
|
|
|
cap.set(4, 480) |
|
|
|
except: |
|
|
|
print("*** Can't open webcam!",file=sys.stderr) |
|
|
|
exit(-2) |
|
|
|
|
|
|
|
print("Please take your chair & get out of the picture!\nWait 5s for continuation message ...",file=sys.stderr) |
|
|
|
time.sleep(5) # Time for operator to disappear! |
|
|
|
if alternate_background != None: # Shall we provide an alternative background? |
|
|
|
for i in range(1,75): # Warmup skip ... |
|
|
|
ret, background_frame = cap.read() |
|
|
|
if ret!=True: |
|
|
|
print("*** Can't create background frame!",file=sys.stderr) |
|
|
|
exit(-3) |
|
|
|
|
|
|
|
print("Thank you! You may enter webcam range again ...",file=sys.stderr) |
|
|
|
cv2.imshow('Empty room', background_frame) |
|
|
|
cv2.waitKey(0) |
|
|
|
cv2.destroyAllWindows() |
|
|
|
|
|
|
|
cap = cv2.VideoCapture(0) # Host: /dev/video0 VM: /dev/video1 (Logitech HD Webcam 525) |
|
|
|
if not cap.isOpened(): #if not open already |
|
|
|
cap.open() #make sure, we will have data |
|
|
|
# Adjust channel resolution & use actual |
|
|
|
cap.set(3, 640) |
|
|
|
cap.set(4, 480) |
|
|
|
while(cap.isOpened()): |
|
|
|
try: |
|
|
|
ret, frame = cap.read() |
|
|
@ -105,6 +198,9 @@ while(cap.isOpened()): |
|
|
|
|
|
|
|
# Now: The fun stuff! ------------------------------------------ |
|
|
|
|
|
|
|
if alternate_background != None: # Green screen look-a-like pipeline |
|
|
|
frame = do_alternate_background(frame,background_frame,alternate_frame) |
|
|
|
|
|
|
|
# Emblem stancil (requires BGRA color space!) |
|
|
|
scr_width = scr_width + scr_sign |
|
|
|
if scr_width <= 4: |
|
|
@ -117,8 +213,9 @@ while(cap.isOpened()): |
|
|
|
(x_offset+((60-scr_width)//2), y_offset), |
|
|
|
src2[:, :, 3] / 255.0) |
|
|
|
|
|
|
|
frame4 = cv2.cvtColor(frame.copy(),cv2.COLOR_BGRA2RGB) # Correct color space |
|
|
|
frame4 = cv2.cvtColor(frame,cv2.COLOR_BGRA2RGB) # Correct color space |
|
|
|
|
|
|
|
# Emblem subtext |
|
|
|
cv2.putText(frame4, "hacKNnology TV", |
|
|
|
(4,y_offset+76), # x,y |
|
|
|
(cv2.FONT_HERSHEY_DUPLEX), # Font |
|
|
@ -145,13 +242,13 @@ while(cap.isOpened()): |
|
|
|
|
|
|
|
# Base (text) rectangle |
|
|
|
cv2.rectangle(frame4, |
|
|
|
(16+current_x,356+24+current_y), # Top/left |
|
|
|
(352+current_x,416+24+current_y), # Bottom right |
|
|
|
(16+current_x,356+12+current_y), # Top/left |
|
|
|
(352+current_x,416+12+current_y), # Bottom right |
|
|
|
(255,255,0), # Color |
|
|
|
-1) # Thickness |
|
|
|
# Link to center |
|
|
|
cv2.line(frame4, |
|
|
|
(156+current_x,360+24+current_y), # x1,y1 |
|
|
|
(156+current_x,360+12+current_y), # x1,y1 |
|
|
|
(280,330), # x2,y2 |
|
|
|
(255,255,0), # Color |
|
|
|
2) # Thickness |
|
|
@ -168,13 +265,13 @@ while(cap.isOpened()): |
|
|
|
2) # Thickness |
|
|
|
# Text frame |
|
|
|
cv2.rectangle(frame4, |
|
|
|
(16+4+current_x,356+24+4+current_y), # Top/left |
|
|
|
(352-4+current_x,416+24-4+current_y), # Bottom right |
|
|
|
(16+4+current_x,356+12+4+current_y), # Top/left |
|
|
|
(352-4+current_x,416+12-4+current_y), # Bottom right |
|
|
|
(0,0,0), # Color |
|
|
|
-1) # Thickness |
|
|
|
# Actual text output |
|
|
|
cv2.putText(frame4, s, |
|
|
|
(32+current_x, 400+24+current_y), # x,y |
|
|
|
(32+current_x, 400+12+current_y), # x,y |
|
|
|
(cv2.QT_FONT_NORMAL or cv2.QT_FONT_LIGHT), # Font |
|
|
|
1.4, # Scaling |
|
|
|
(255, 255, 255), # RGB |
|
|
@ -247,13 +344,19 @@ while(cap.isOpened()): |
|
|
|
|
|
|
|
# End of fun stuff ... -------------------------------------------- |
|
|
|
|
|
|
|
# Write raw output to video device |
|
|
|
# Ref.: https://stackoverflow.com/questions/36579542/sending-opencv-output-to-vlc-stream |
|
|
|
# Write raw output (to be redirected to video device) |
|
|
|
sys.stdout.buffer.write(frame4.tobytes()) |
|
|
|
else: |
|
|
|
print("*** Frame not read?!",file=sys.stderr) |
|
|
|
break |
|
|
|
except: |
|
|
|
except KeyboardInterrupt: # Quit gracefully ... |
|
|
|
break |
|
|
|
except Exception as e: |
|
|
|
print("\n*** funcam aborted?!",file=sys.stderr) |
|
|
|
print(str(e),file=sys.stderr) |
|
|
|
break |
|
|
|
|
|
|
|
cap.release() |
|
|
|
print("\nfuncam terminated.",file=sys.stderr) |
|
|
|
print("\n<<< funcam terminated. >>>",file=sys.stderr) |
|
|
|
|