Added project outline
This commit is contained in:
parent
47502307b0
commit
0e477519fe
10 changed files with 6851 additions and 0 deletions
2655
docs/project-outline/IEEEannot.bst
Normal file
2655
docs/project-outline/IEEEannot.bst
Normal file
File diff suppressed because it is too large
Load diff
2474
docs/project-outline/IEEEannotU.bst
Normal file
2474
docs/project-outline/IEEEannotU.bst
Normal file
File diff suppressed because it is too large
Load diff
1364
docs/project-outline/authordate2annot.bst
Normal file
1364
docs/project-outline/authordate2annot.bst
Normal file
File diff suppressed because it is too large
Load diff
111
docs/project-outline/mmp.bib
Normal file
111
docs/project-outline/mmp.bib
Normal file
|
@ -0,0 +1,111 @@
|
||||||
|
|
||||||
|
@article{dcnn,
|
||||||
|
title = {Photo {Aesthetics} {Analysis} via {DCNN} {Feature} {Encoding}},
|
||||||
|
volume = {19},
|
||||||
|
issn = {1941-0077},
|
||||||
|
doi = {10.1109/TMM.2017.2687759},
|
||||||
|
abstract = {We propose an automatic framework for quality assessment of a photograph as well as analysis of its aesthetic attributes. In contrast to the previous methods that rely on manually designed features to account for photo aesthetics, our method automatically extracts such features using a pretrained deep convolutional neural network (DCNN). To make the DCNN-extracted features more suited to our target tasks of photo quality assessment and aesthetic attribute analysis, we propose a novel feature encoding scheme, which supports vector machines-driven sparse restricted Boltzmann machines, which enhances sparseness of features and discrimination between target classes. Experimental results show that our method outperforms the current state-of-the-art methods in automatic photo quality assessment, and gives aesthetic attribute ratings that can be used for photo editing. We demonstrate that our feature encoding scheme can also be applied to general object classification task to achieve performance gains.},
|
||||||
|
number = {8},
|
||||||
|
journal = {IEEE Transactions on Multimedia},
|
||||||
|
author = {Lee, Hui-Jin and Hong, Ki-Sang and Kang, Henry and Lee, Seungyong},
|
||||||
|
month = aug,
|
||||||
|
year = {2017},
|
||||||
|
note = {Conference Name: IEEE Transactions on Multimedia},
|
||||||
|
keywords = {Aesthetic attributes, deep convolutional neural network (DCNN), Encoding, feature encoding, Feature extraction, Mathematical model, Neural networks, photo aesthetics, Quality assessment, restricted Boltzmann machines, Support vector machines, Training},
|
||||||
|
pages = {1921--1932},
|
||||||
|
annote = {This paper discusses the use of a Convolution Neural Network to predict how aesthetic a given picture is.},
|
||||||
|
file = {IEEE Xplore Abstract Record:/home/noble/Zotero/storage/E6YUFLQE/7886320.html:text/html},
|
||||||
|
}
|
||||||
|
|
||||||
|
@inproceedings{ava_paper,
|
||||||
|
title = {{AVA}: {A} large-scale database for aesthetic visual analysis},
|
||||||
|
shorttitle = {{AVA}},
|
||||||
|
doi = {10.1109/CVPR.2012.6247954},
|
||||||
|
abstract = {With the ever-expanding volume of visual content available, the ability to organize and navigate such content by aesthetic preference is becoming increasingly important. While still in its nascent stage, research into computational models of aesthetic preference already shows great potential. However, to advance research, realistic, diverse and challenging databases are needed. To this end, we introduce a new large-scale database for conducting Aesthetic Visual Analysis: AVA. It contains over 250,000 images along with a rich variety of meta-data including a large number of aesthetic scores for each image, semantic labels for over 60 categories as well as labels related to photographic style. We show the advantages of AVA with respect to existing databases in terms of scale, diversity, and heterogeneity of annotations. We then describe several key insights into aesthetic preference afforded by AVA. Finally, we demonstrate, through three applications, how the large scale of AVA can be leveraged to improve performance on existing preference tasks.},
|
||||||
|
booktitle = {2012 {IEEE} {Conference} on {Computer} {Vision} and {Pattern} {Recognition}},
|
||||||
|
author = {Murray, Naila and Marchesotti, Luca and Perronnin, Florent},
|
||||||
|
month = jun,
|
||||||
|
year = {2012},
|
||||||
|
note = {ISSN: 1063-6919},
|
||||||
|
keywords = {Communities, Image color analysis, Semantics, Social network services, Visual databases, Visualization},
|
||||||
|
pages = {2408--2415},
|
||||||
|
annote = {A paper that discusses the creation of the AVA dataset. It's composed of 250 thousand aesthetic images collected from a photography competition website along with grades for aesthetic features.},
|
||||||
|
file = {IEEE Xplore Abstract Record:/home/noble/Zotero/storage/HE3EFVJV/6247954.html:text/html},
|
||||||
|
}
|
||||||
|
|
||||||
|
@misc{AADB,
|
||||||
|
title = {Photo {Aesthetics} {Ranking} {Network} with {Attributes} and {Content} {Adaptation}},
|
||||||
|
url = {https://github.com/aimerykong/deepImageAestheticsAnalysis},
|
||||||
|
abstract = {ECCV2016 - fine-grained photo aesthetics rating with interpretability},
|
||||||
|
urldate = {2022-02-11},
|
||||||
|
author = {Kong, Shu},
|
||||||
|
month = jan,
|
||||||
|
year = {2022},
|
||||||
|
note = {original-date: 2016-06-05T06:08:10Z},
|
||||||
|
annote = {A tool that uses the AADB dataset to predict the aesthetics of a given picture using a CNN.},
|
||||||
|
}
|
||||||
|
|
||||||
|
@misc{pytorch,
|
||||||
|
title = {{PyTorch}},
|
||||||
|
url = {https://www.pytorch.org},
|
||||||
|
abstract = {An open source machine learning framework that accelerates the path from research prototyping to production deployment.},
|
||||||
|
language = {en},
|
||||||
|
urldate = {2022-02-11},
|
||||||
|
author = {{Adam Paszke} and {Sam Gross} and {Soumith Chintala} and {Gregory Chanan}},
|
||||||
|
annote = {An open-source machine learning framework developed by Facebook (based on the Torch library)},
|
||||||
|
file = {Snapshot:/home/noble/Zotero/storage/HPSFJGU3/pytorch.org.html:text/html},
|
||||||
|
}
|
||||||
|
|
||||||
|
@misc{opencv,
|
||||||
|
title = {{OpenCV}},
|
||||||
|
url = {https://opencv.org},
|
||||||
|
abstract = {OpenCV provides a real-time optimized Computer Vision library, tools, and hardware. It also supports model execution for Machine Learning (ML) and Artificial Intelligence (AI).},
|
||||||
|
language = {en-US},
|
||||||
|
urldate = {2022-02-11},
|
||||||
|
journal = {OpenCV},
|
||||||
|
author = {{Intel Corporation} and {Willow Garage} and {Itseez}},
|
||||||
|
annote = {An open source library used for real-time computer vision.},
|
||||||
|
file = {Snapshot:/home/noble/Zotero/storage/JYX6KIPC/opencv.org.html:text/html},
|
||||||
|
}
|
||||||
|
|
||||||
|
@misc{ava_downloader,
|
||||||
|
title = {{AVA} {Dataset}},
|
||||||
|
url = {https://github.com/imfing/ava_downloader},
|
||||||
|
abstract = {:arrow\_double\_down: Download AVA dataset (A Large-Scale Database for Aesthetic Visual Analysis)},
|
||||||
|
urldate = {2022-02-11},
|
||||||
|
author = {Fing},
|
||||||
|
month = feb,
|
||||||
|
year = {2022},
|
||||||
|
note = {original-date: 2016-11-13T02:20:32Z},
|
||||||
|
keywords = {aesthetic-visual-analysis, ava, computer-vision, dataset, python},
|
||||||
|
annote = {A project with tools required to build the AVA dateset.},
|
||||||
|
}
|
||||||
|
|
||||||
|
@misc{image_quality_assessment,
|
||||||
|
title = {Image {Quality} {Assessment}},
|
||||||
|
copyright = {Apache-2.0},
|
||||||
|
url = {https://github.com/idealo/image-quality-assessment},
|
||||||
|
abstract = {Convolutional Neural Networks to predict the aesthetic and technical quality of images.},
|
||||||
|
urldate = {2022-02-11},
|
||||||
|
publisher = {idealo},
|
||||||
|
month = feb,
|
||||||
|
year = {2022},
|
||||||
|
note = {original-date: 2018-06-12T14:46:09Z},
|
||||||
|
keywords = {aws, computer-vision, convolutional-neural-networks, deep-learning, e-commerce, idealo, image-quality-assessment, keras, machine-learning, mobilenet, neural-network, nima, tensorflow},
|
||||||
|
annote = {An open source tool developed by Idealo which uses a CNN to predict the aesthetic value of a given image.},
|
||||||
|
}
|
||||||
|
|
||||||
|
@misc{tensorflow,
|
||||||
|
title = {{TensorFlow}},
|
||||||
|
url = {https://www.tensorflow.org/},
|
||||||
|
urldate = {2022-02-11},
|
||||||
|
author = {{Google Brain Team}},
|
||||||
|
annote = {Open-source library for machine learning and AI developed by Google.},
|
||||||
|
}
|
||||||
|
|
||||||
|
@misc{flutter,
|
||||||
|
title = {Flutter - {Build} apps for any screen},
|
||||||
|
url = {https://flutter.dev/},
|
||||||
|
abstract = {Flutter transforms the entire app development process. Build, test, and deploy beautiful mobile, web, desktop, and embedded apps from a single codebase.},
|
||||||
|
urldate = {2022-02-11},
|
||||||
|
}
|
130
docs/project-outline/mmpv2.sty
Normal file
130
docs/project-outline/mmpv2.sty
Normal file
|
@ -0,0 +1,130 @@
|
||||||
|
%% LaTeX class to write Major/Minor projects for the Computer Science Department,
|
||||||
|
%% Aberystwyth University.
|
||||||
|
%%
|
||||||
|
%% Written by Neil Taylor
|
||||||
|
%% Comments and bugs to nst@aber.ac.uk
|
||||||
|
%%
|
||||||
|
%% See the accompanying file MMP_ProgressReport_example.tex for an example on how to
|
||||||
|
%% use it.
|
||||||
|
%%
|
||||||
|
\ProvidesPackage{mmpv2}
|
||||||
|
\usepackage[a4paper,margin=2.5cm,nohead, headheight=3cm, headsep=20pt]{geometry}
|
||||||
|
\usepackage{graphicx} %to allow images to be imported see example at end of template
|
||||||
|
|
||||||
|
\newcommand{\is}{\hspace*{0.2in}} %little indent space
|
||||||
|
|
||||||
|
\newcommand{\mmpdocdate}{\today}
|
||||||
|
|
||||||
|
\newcommand{\documentdate}[1]{
|
||||||
|
\renewcommand{\mmpdocdate}{#1}
|
||||||
|
}
|
||||||
|
|
||||||
|
\newcommand{\name}[1]{
|
||||||
|
\newcommand{\showname}{#1}
|
||||||
|
}
|
||||||
|
|
||||||
|
\newcommand{\userid}[1]{
|
||||||
|
\newcommand{\showuserid}{#1}
|
||||||
|
}
|
||||||
|
|
||||||
|
\newcommand{\supervisor}[1]{
|
||||||
|
\newcommand{\showsupervisor}{#1}
|
||||||
|
}
|
||||||
|
|
||||||
|
\newcommand{\supervisorid}[1]{
|
||||||
|
\newcommand{\showsupervisorid}{#1}
|
||||||
|
}
|
||||||
|
|
||||||
|
\newcommand{\projecttitle}[1]{
|
||||||
|
\newcommand{\showprojecttitle}{#1}
|
||||||
|
}
|
||||||
|
|
||||||
|
\newcommand{\reporttitle}[1]{
|
||||||
|
\newcommand{\showreporttitle}{#1}
|
||||||
|
}
|
||||||
|
|
||||||
|
\newcommand{\projecttitlememoir}[1]{
|
||||||
|
\newcommand{\showprojecttitlememoir}{#1}
|
||||||
|
}
|
||||||
|
|
||||||
|
\newcommand{\version}[1]{
|
||||||
|
\newcommand{\showversion}{#1}
|
||||||
|
}
|
||||||
|
|
||||||
|
\newcommand{\docstatus}[1]{
|
||||||
|
\newcommand{\showdocstatus}{#1}
|
||||||
|
}
|
||||||
|
|
||||||
|
\newcommand{\degreeschemecode}[1]{
|
||||||
|
\newcommand{\showdegreeschemecode}{#1}
|
||||||
|
}
|
||||||
|
|
||||||
|
\newcommand{\degreeschemename}[1]{
|
||||||
|
\newcommand{\showdegreeschemename}{#1}
|
||||||
|
}
|
||||||
|
|
||||||
|
\newcommand{\modulecode}[1]{
|
||||||
|
\newcommand{\showmodulecode}{#1}
|
||||||
|
}
|
||||||
|
|
||||||
|
\newcommand{\helv}{
|
||||||
|
%bold, 18point, 21point line spacing
|
||||||
|
\fontfamily{phv}\fontseries{b}\fontsize{18}{21}\selectfont
|
||||||
|
}
|
||||||
|
|
||||||
|
\newcommand{\mmp}{
|
||||||
|
\pagestyle{fancy}
|
||||||
|
\thispagestyle{empty} %no headers/footers on title page
|
||||||
|
%
|
||||||
|
\begin{center}
|
||||||
|
\parbox{\textwidth}{
|
||||||
|
\begin{center} \helv
|
||||||
|
\showprojecttitle
|
||||||
|
\end{center}
|
||||||
|
}
|
||||||
|
%
|
||||||
|
\vspace{0.3in}\\
|
||||||
|
%
|
||||||
|
\fontsize{10}{12}
|
||||||
|
\selectfont
|
||||||
|
\begin{tabular}[t]{ll}
|
||||||
|
\hline
|
||||||
|
\\
|
||||||
|
Report Name & \parbox{4in}{\showreporttitle}\\
|
||||||
|
Author (User Id) & \parbox{4in}{\showname \hspace*{0.05in}(\showuserid)}\\
|
||||||
|
Supervisor (User Id) & \showsupervisor \hspace*{0.05in}(\showsupervisorid)\\
|
||||||
|
\\
|
||||||
|
Module & \parbox{4in}{\showmodulecode}\\
|
||||||
|
Degree Scheme & \showdegreeschemecode \hspace*{0.05in}(\showdegreeschemename)\\
|
||||||
|
\\
|
||||||
|
Date & \mmpdocdate\\
|
||||||
|
Revision & \showversion\\
|
||||||
|
Status & \showdocstatus\\
|
||||||
|
\\
|
||||||
|
\hline
|
||||||
|
\vspace{0.1in}
|
||||||
|
\end{tabular}
|
||||||
|
\end{center}
|
||||||
|
%
|
||||||
|
\pagebreak
|
||||||
|
\newpage
|
||||||
|
\makeheaders
|
||||||
|
\setcounter{page}{1}
|
||||||
|
\normalsize
|
||||||
|
}
|
||||||
|
|
||||||
|
%==============================================================================
|
||||||
|
%header and footer information
|
||||||
|
%==============================================================================
|
||||||
|
\newcommand{\makeheaders}{
|
||||||
|
\fancyhead{}
|
||||||
|
%Right on Odd numberd pages and Left on Even numbered pages
|
||||||
|
\fancyhead[LO,RE]{{\showreporttitle} - {\showversion} ({\showdocstatus})}
|
||||||
|
\fancyhead[RO,LE]{{\showname} ({\showuserid})}
|
||||||
|
\fancyfoot{}
|
||||||
|
\fancyfoot[RO,LE]{{\thepage} of {\pageref{LastPage}}}
|
||||||
|
\fancyfoot[LO,RE]{\showprojecttitlememoir}
|
||||||
|
\renewcommand{\headrulewidth}{0.0pt}
|
||||||
|
\renewcommand{\footrulewidth}{0.0pt}
|
||||||
|
}
|
||||||
|
|
BIN
docs/project-outline/project-outline-osp1.pdf
Normal file
BIN
docs/project-outline/project-outline-osp1.pdf
Normal file
Binary file not shown.
117
docs/project-outline/project-outline-osp1.tex
Normal file
117
docs/project-outline/project-outline-osp1.tex
Normal file
|
@ -0,0 +1,117 @@
|
||||||
|
\documentclass[11pt,fleqn,twoside]{article}
|
||||||
|
\usepackage{makeidx}
|
||||||
|
\makeindex
|
||||||
|
\usepackage{palatino} %or {times} etc
|
||||||
|
\usepackage{plain} %bibliography style
|
||||||
|
\usepackage{amsmath} %math fonts - just in case
|
||||||
|
\usepackage{amsfonts} %math fonts
|
||||||
|
\usepackage{amssymb} %math fonts
|
||||||
|
\usepackage{lastpage} %for footer page numbers
|
||||||
|
\usepackage{fancyhdr} %header and footer package
|
||||||
|
\usepackage{mmpv2}
|
||||||
|
%\usepackage{url}
|
||||||
|
\usepackage{hyperref}
|
||||||
|
|
||||||
|
% the following packages are used for citations - You only need to include one.
|
||||||
|
%
|
||||||
|
% Use the cite package if you are using the numeric style (e.g. IEEEannot).
|
||||||
|
% Use the natbib package if you are using the author-date style (e.g. authordate2annot).
|
||||||
|
% Only use one of these and comment out the other one.
|
||||||
|
\usepackage{cite}
|
||||||
|
%\usepackage{natbib}
|
||||||
|
|
||||||
|
\begin{document}
|
||||||
|
|
||||||
|
\name{Oscar Pocock}
|
||||||
|
\userid{osp1}
|
||||||
|
\projecttitle{Autophotographer: aesthetic judgements from image data}
|
||||||
|
\projecttitlememoir{Autophotographer} %same as the project title or abridged version for page header
|
||||||
|
\reporttitle{Project Outline}
|
||||||
|
\version{1.0}
|
||||||
|
\docstatus{Release} % change to Release when you are ready to submit your document
|
||||||
|
\modulecode{CS39440}
|
||||||
|
\degreeschemecode{G401}
|
||||||
|
\degreeschemename{Computer Science (with integrated year in industry)}
|
||||||
|
\supervisor{Hannah Dee} % e.g. Neil Taylor
|
||||||
|
\supervisorid{hmd1} % e.g. nst
|
||||||
|
|
||||||
|
%optional - comment out next line to use current date for the document
|
||||||
|
%\documentdate{8th February 2022}
|
||||||
|
\mmp
|
||||||
|
|
||||||
|
%\setcounter{tocdepth}{3} %set required number of level in table of contents
|
||||||
|
|
||||||
|
|
||||||
|
%==============================================================================
|
||||||
|
\section{Project description}
|
||||||
|
%==============================================================================
|
||||||
|
|
||||||
|
The main aim of the "Autophotographer" project is to develop a program which will select aesthetic pictures from a given video file. The user will give the program a video file, the program will then process the file, and select the most aesthetic frames using data analysis and machine learning techniques.\\
|
||||||
|
|
||||||
|
The program will be written in Python and will utilise machine learning libraries (OpenCV\cite{opencv}, PyTorch\cite{pytorch}, TensorFlow\cite{tensorflow}) to help select the most aesthetic frames in the last stage of the process. The program will initially be a CLI, if there's time I would like to develop a cross-platform GUI in something like Flutter\cite{Flutter} to improve accessibility.\\
|
||||||
|
|
||||||
|
A Convolution Neural Network (CNN) will be trained with an existing dataset (AVA\cite{ava_paper} or AADB\cite{AADB}) and used to predict and select the most aesthetic frames based on the strength of certain aesthetic features detected in the image. The majority of the program will only use the CNN once the original set of input frames has been reduced using conventional non-machine learning techniques.\\
|
||||||
|
|
||||||
|
If there's more time, I would also like to incorporate a machine learning editing system, which will attempt to enhance certain detected features automatically by editing the picture. For example, cropping to increase a match to the 'rule of thirds' or blurring the background via edge detection to increase a shallow depth of field. This would be an optional setting and would be non-destructive editing.\\
|
||||||
|
|
||||||
|
The final goal of the project is to provide a tool that minimises the manual process of selecting aesthetically significant pictures from a video. This can help improve automatic thumbnail selection for online videos, or created photo album from a video for a different way to experience the same memory. Personally, I'm very interested in using this technology as a way to aid people with certain motor and sensory conditions/disorders, who might find it difficult/impossible to take aesthetic pictures naturally.\\
|
||||||
|
|
||||||
|
%==============================================================================
|
||||||
|
\section{Proposed tasks}
|
||||||
|
%==============================================================================
|
||||||
|
|
||||||
|
\begin{itemize}
|
||||||
|
|
||||||
|
\item \textbf{Context specific research into machine learning} - I will need to research further into CNNs and how to build and finetune them, this should be done in tandem to learning a machine learning library. Work on this should start during the 3rd week.
|
||||||
|
\item \textbf{Setup tools for development} - I will need to setup tools to help me practise development methodologies. This will help me keep track and plan ahead. This should be finished by the start of the 3rd week.
|
||||||
|
\item \textbf{Learn machine learning libraries} - For the machine learning aspect of the project I will need to learn how to use machine learning libraries. I will use these to process a reduced set of the frames and determine how easthetic each of them are. This should be done in tandem to researching more about machine learning and CNNs. This should start in the 3rd week.
|
||||||
|
\item \textbf{Create a CLI tool in Python} - As I'm working with computer vision, I'll be writing the program in Python which will be the most important task. Work has already started on this and this should be expected to be finish in the last week of the project.
|
||||||
|
% \item \textbf{Create a GUI} - If there's time, I will produce a cross platform GUI to make the utility more accessible. At the moment I'm looking at utilising Flutter for this as it's cross-platform.
|
||||||
|
\item \textbf{Enhance aesthetic properties of selected photos} - If I have more time, I will also look at using the knowledge obtained through machine learning to improve the selected frames by enhancing their most significant aesthetic properties. This should only start once the original work has been completed and should be finished by the end of the project.
|
||||||
|
|
||||||
|
\end{itemize}
|
||||||
|
|
||||||
|
%==============================================================================
|
||||||
|
\section{Project deliverables}
|
||||||
|
|
||||||
|
\begin{itemize}
|
||||||
|
\item \textbf{CLI program written in Python} - A CLI Python program, with built in help that takes a video file as an input and outputs image files to a specified output folder.
|
||||||
|
\item \textbf{User and Developer Manual} - A manual for users to help them understand how to use the program and tune the parameters to their preference. Another manual for developers will be written to help people extend or make modifications to the program.
|
||||||
|
% \item \textbf{Cross-platform GUI} - A GUI will be created to make the program more accessible to users. This will be built using a cross-platform GUI library/framework. (Currently looking at using Flutter). The aim is to have this running on at least: Windows 10/11, Major Linux Distributions, MacOS (Latest).
|
||||||
|
\item \textbf{Final report} - A final report that discusssed the initial problem, details of the technical work achieved and it's development process, and a critical evaluation and insight into the project as a whole.
|
||||||
|
\end{itemize}
|
||||||
|
|
||||||
|
|
||||||
|
%==============================================================================
|
||||||
|
|
||||||
|
%
|
||||||
|
% Start to comment out / remove the following lines. They are only provided for instruction for this example template. You don't need the following section title, because it will be added as part of the bibliography section.
|
||||||
|
%
|
||||||
|
%==============================================================================
|
||||||
|
%\section*{Your Bibliography - REMOVE this title and text for final version}
|
||||||
|
%%==============================================================================
|
||||||
|
%%
|
||||||
|
%You need to include an annotated bibliography. This should list all relevant web pages, books, journals etc. that you have consulted in researching your project. Each reference should include an annotation.
|
||||||
|
%
|
||||||
|
%The purpose of the section is to understand what sources you are looking at. A correctly formatted list of items and annotations is sufficient. You might go further and make use of bibliographic tools, e.g. BibTeX in a LaTeX document, could be used to provide citations, for example \cite{NumericalRecipes} \cite{MarksPaper} \cite[99-101]{FailBlog} \cite{kittenpic_ref}. The bibliographic tools are not a requirement, but you are welcome to use them.
|
||||||
|
%
|
||||||
|
%You can remove the above {\em Your Bibliography} section heading because it will be added in by the renewcommand which is part of the bibliography. The correct annotated bibliography information is provided below.
|
||||||
|
%
|
||||||
|
% End of comment out / remove the lines. They are only provided for instruction for this example template.
|
||||||
|
%
|
||||||
|
|
||||||
|
|
||||||
|
\nocite{*} % include everything from the bibliography, irrespective of whether it has been referenced.
|
||||||
|
|
||||||
|
% the following line is included so that the bibliography is also shown in the table of contents. There is the possibility that this is added to the previous page for the bibliography. To address this, a newline is added so that it appears on the first page for the bibliography.
|
||||||
|
\newpage
|
||||||
|
\addcontentsline{toc}{section}{Initial Annotated Bibliography}
|
||||||
|
|
||||||
|
%
|
||||||
|
% example of including an annotated bibliography. The current style is an author date one. If you want to change, comment out the line and uncomment the subsequent line. You should also modify the packages included at the top (see the notes earlier in the file) and then trash your aux files and re-run.
|
||||||
|
%\bibliographystyle{authordate2annot}
|
||||||
|
\bibliographystyle{IEEEannotU}
|
||||||
|
\renewcommand{\refname}{Annotated Bibliography} % if you put text into the final {} on this line, you will get an extra title, e.g. References. This isn't necessary for the outline project specification.
|
||||||
|
\bibliography{mmp} % References file
|
||||||
|
|
||||||
|
\end{document}
|
Before Width: | Height: | Size: 355 KiB After Width: | Height: | Size: 355 KiB |
Before Width: | Height: | Size: 13 KiB After Width: | Height: | Size: 13 KiB |
Reference in a new issue