Restructured project as a package
Some checks failed
ci/woodpecker/push/woodpecker Pipeline failed

This commit is contained in:
Oscar Blue 2022-04-28 22:11:56 +01:00
parent 971cd62bb4
commit 484e481d88
27 changed files with 160 additions and 115 deletions

View file

@ -1,24 +1,16 @@
# IMPORTS
# import from
from tqdm import tqdm
from skimage.exposure import is_low_contrast
from os.path import abspath
# import as
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
# imports
import cv2
import math
import argparse
import os
from os.path import abspath
import cv2
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import yaml
import math
# local imports for filters
import filters.focusdetection.focusdetection as focusdetection
import filters.brightness.brightness as brightness
from skimage.exposure import is_low_contrast
from tqdm import tqdm
import autophotographer.filters.brightness.brightness as brightness
import autophotographer.filters.focusdetection.focusdetection as focusdetection
# GLOBAL VARIABLES
# accepted image formats
@ -37,6 +29,13 @@ ignore_video = False
# FUNCTIONS
# load config file
def load_config(path=os.path.join(os.path.dirname(__file__), "./config.yml")):
"""
The load_config function loads a YAML configuration file from the specified path.
:param path=os.path.join(os.path.dirname(__file__): Used to Specify the path of the config.
:param "./config.yml"): Used to Specify the path to the config file.
:return: A dictionary of the config file.
"""
abs_path = os.path.abspath(path)
# check if file exists
@ -50,11 +49,28 @@ def load_config(path=os.path.join(os.path.dirname(__file__), "./config.yml")):
print("[ERRO] Please specify a file with extension '.yml' or '.yaml'.")
quit()
else:
print("[ERRO] Path does not exist")
quit()
raise FileNotFoundError("[ERRO] Path does not exist")
# load the correct filter function from filter name
def filter_to_function(imagefilter: str, paths: list) -> list:
"""
The filter_to_function function filters a list of image paths based on the filter type.
Args:
imagefilter (str): The type of filter to apply. Can be "brightness", "contrast", or "focus".
paths (list): A list of filepaths to images that will be filtered.
brightness_thresh (int, optional): The threshold for filtering out images with low brightness levels. Defaults to 100 if not specified by user in config file or command line arguments.
contrast_thresh (float, optional): The threshold for filtering out images with low contrast levels. Defaults to 1 if not specified by user in config file or command line arguments..
focus_thresh (float, optional): The threshold for filtering out blurry images based on the variance in blurriness across all pixels within an image's bounding box as determined using OpenCV's Laplacian method and Gaussian Blur method respectively . Defaults to 0 if not specified by user in config file or command line arguments..
:param imagefilter:str: Used to Determine which filter to apply.
:param paths:list: Used to Store the paths of all images that have been filtered.
:return: The filtered list of paths.
"""
if imageFilter == "brightness":
paths = filter_brightness(paths, brightness_thresh)
print("[INFO] Filtering based on brightness...")
@ -291,56 +307,57 @@ def display_images(paths, location):
plt.savefig(location)
# parse command line arguments
parser = argparse.ArgumentParser()
parser.add_argument("-i", "--input", type=os.path.abspath, required=True, nargs="+",
help="path to video or image folder")
parser.add_argument("-c", "--config", type=os.path.abspath, help="path to config file")
args = vars(parser.parse_args())
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-i", "--input", type=os.path.abspath, required=True, nargs="+",
help="path to video or image folder")
parser.add_argument("-c", "--config", type=os.path.abspath, help="path to config file")
args = vars(parser.parse_args())
# load in config file
if args["config"] is not None:
autophotoConf = load_config(args["config"])
else:
autophotoConf = load_config()
# load in config file
if args["config"] is not None:
autophotoConf = load_config(args["config"])
else:
autophotoConf = load_config()
# Load values for options
if autophotoConf["brightness_options"]["threshold"] is not None:
brightness_thresh = autophotoConf["brightness_options"]["threshold"]
if autophotoConf["focus_options"]["threshold"] is not None:
focus_thresh = autophotoConf["focus_options"]["threshold"]
if autophotoConf["contrast_options"]["threshold"] is not None:
contrast_thresh = autophotoConf["contrast_options"]["threshold"]
if autophotoConf["filesize_options"]["threshold"] is not None:
filesize_thresh = autophotoConf["filesize_options"]["threshold"]
if autophotoConf["ignore_video"] is not None:
ignore_video = autophotoConf["ignore_video"]
# Load values for options
if autophotoConf["brightness_options"]["threshold"] is not None:
brightness_thresh = autophotoConf["brightness_options"]["threshold"]
if autophotoConf["focus_options"]["threshold"] is not None:
focus_thresh = autophotoConf["focus_options"]["threshold"]
if autophotoConf["contrast_options"]["threshold"] is not None:
contrast_thresh = autophotoConf["contrast_options"]["threshold"]
if autophotoConf["filesize_options"]["threshold"] is not None:
filesize_thresh = autophotoConf["filesize_options"]["threshold"]
if autophotoConf["ignore_video"] is not None:
ignore_video = autophotoConf["ignore_video"]
paths = filter_paths(args["input"])
print("[INFO] Loaded {} objects.".format(len(paths)))
prior_paths = []
path_diff = []
paths = filter_paths(args["input"])
print("[INFO] Loaded {} objects.".format(len(paths)))
prior_paths = []
path_diff = []
# Order and selection of operations from config file
if autophotoConf["filters"] is not None:
# Order and selection of operations from config file
if autophotoConf["filters"] is not None:
# iterate over all chosen filters
for imageFilter in autophotoConf["filters"]:
prior_paths = paths
# run given filter
paths = filter_to_function(imageFilter, paths)
# iterate over all chosen filters
for imageFilter in autophotoConf["filters"]:
prior_paths = paths
path_diff = list(set(paths) - set(prior_paths)) + list(set(prior_paths) - set(paths))
diff = len(prior_paths) - len(paths)
# filename = "/src/fig" + "-" + str(imageFilter) + ".png"
# display_images(path_diff, filename)
# run given filter
paths = filter_to_function(imageFilter, paths)
# calculate set difference after filtering
if diff == 0:
print("[INFO] No images were filtered.")
else:
print("[INFO] Filtered {}/{} images via {} filtering.".format(
len(paths), len(prior_paths), imageFilter))
path_diff = list(set(paths) - set(prior_paths)) + list(set(prior_paths) - set(paths))
diff = len(prior_paths) - len(paths)
# filename = "/src/fig" + "-" + str(imageFilter) + ".png"
# display_images(path_diff, filename)
if autophotoConf["CNNrank"]:
print("[INFO] Running CNN ranking...")
# calculate set difference after filtering
if diff == 0:
print("[INFO] No images were filtered.")
else:
print("[INFO] Filtered {}/{} images via {} filtering.".format(
len(paths), len(prior_paths), imageFilter))
if autophotoConf["CNNrank"]:
print("[INFO] Running CNN ranking...")

View file

@ -1,8 +1,10 @@
import cv2
import sys
import argparse
import os
import pathlib
import sys
import cv2
# Process arguments
def parse_arguments(argv=None):

View file

@ -1,6 +1,7 @@
import torch
import os
import torch
# https://pytorch.org/hub/pytorch_vision_resnet/
MEAN = [0.485, 0.456, 0.406]
STD = [0.229, 0.224, 0.225]

View file

Can't render this file because it is too large.

View file

@ -1,20 +1,23 @@
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import os
import random
import time
from sklearn.preprocessing import MinMaxScaler
import numpy as np
from sklearn.model_selection import train_test_split
import cv2
from pathlib import Path
from . import config
from torchvision import transforms
import torch
from torch.utils.data import DataLoader
import os
from os.path import abspath
from pathlib import Path
import cv2
import matplotlib.image as mpimg
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import torch
from PIL import Image, ImageFile
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import MinMaxScaler
from torch.utils.data import DataLoader
from torchvision import transforms
from . import config
ImageFile.LOAD_TRUNCATED_IMAGES = True
datasetDir = "/datasets/"

View file

@ -0,0 +1,2 @@
tensorArray.pt
testTensorArrayFile.pt

View file

@ -1,18 +1,17 @@
import torch
import torch.nn as nn
import torch.optim as optim
from torchvision.models import resnet50
from torchvision import transforms
from torch.utils.data import DataLoader
from tqdm import tqdm
import time
import os
import time
from os.path import abspath
import matplotlib.pyplot as plt
import config
import dataset
import matplotlib.pyplot as plt
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader
from torchvision import transforms
from torchvision.models import resnet50
from tqdm import tqdm
# projectRoot = "/src/"
script_directory = os.path.dirname(__file__)

View file

@ -0,0 +1,20 @@
import cv2
import numpy as np
def get_luminance_value(image):
"""
The get_luminance_value function takes an image as input and returns the luminance value of that image.
The function first resizes the image to a width of 20 pixels, then converts it to LAB color space,
and finally normalizes the L channel by dividing it by its maximum value. The function returns this normalized
L channel.
:param image: Used to Get the image that is being processed.
:return: The value of the luminance channel.
"""
width = 20
height = int(image.shape[0] * (width / image.shape[0]))
image = cv2.resize(image, (width, height))
L, A, B = cv2.split(cv2.cvtColor(image, cv2.COLOR_BGR2LAB))
L = L/np.max(L)
return np.mean(L)

View file

@ -1,10 +1,11 @@
import argparse
import os
from os.path import abspath
import cv2
import pandas
import numpy
import matplotlib.pyplot as plt
import numpy
import pandas
#parser = argparse.ArgumentParser()
#parser.add_argument('images', type=os.path.abspath, metavar='image-location', nargs='+',
@ -44,6 +45,4 @@ def laplacian_variance_method():
mean_lap_var = df["Laplacian Variance"].mean()
df_not_blurred = df[df['Laplacian Variance']>mean_lap_var]
df_blurred = df[df['Laplacian Variance']<=mean_lap_var]
#def fast_fourier_transform_method():
df_blurred = df[df['Laplacian Variance']<=mean_lap_var]

View file

@ -1,14 +1,14 @@
import argparse
import os
from os.path import abspath
from cnn import config
from cnn import dataset
from torchvision import transforms
import matplotlib.pyplot as plt
from torch import nn
import torch
import argparse
import datetime
import os
from os.path import abspath
import matplotlib.pyplot as plt
import torch
from torch import nn
from torchvision import transforms
from cnn import config, dataset
# set project root for fetching files using relative file paths
script_directory = os.path.dirname(__file__)

View file

@ -1,10 +0,0 @@
import numpy as np
import cv2
def get_luminance_value(image):
width = 20
height = int(image.shape[0] * (width / image.shape[0]))
image = cv2.resize(image, (width, height))
L, A, B = cv2.split(cv2.cvtColor(image, cv2.COLOR_BGR2LAB))
L = L/np.max(L)
return np.mean(L)

BIN
src/output/predict-plot-2022-3-23_13-20-21.png (Stored with Git LFS) Normal file

Binary file not shown.

BIN
src/output/predict-plot-2022-3-23_13-24-49.png (Stored with Git LFS) Normal file

Binary file not shown.

BIN
src/output/predict-plot-2022-3-23_13-27-30.png (Stored with Git LFS) Normal file

Binary file not shown.

BIN
src/output/predict-plot-2022-3-23_13-30-59.png (Stored with Git LFS) Normal file

Binary file not shown.