text
stringlengths 2
104M
| meta
dict |
---|---|
# -*- coding: utf-8 -*-
from markdoc.config import Config
Config.register_default('server.bind', '127.0.0.1')
Config.register_default('server.port', 8008)
Config.register_default('server.num-threads', 10)
Config.register_default('server.name', None)
Config.register_default('server.request-queue-size', 5)
Config.register_default('server.timeout', 10)
def server_maker(config, **extra_config):
"""
Return a server-making callable to create a CherryPy WSGI server.
The server-making callable should be passed a WSGI application, and it
will return an instance of `cherrypy.wsgiserver.CherryPyWSGIServer`.
You can optionally override any of the hardwired configuration
parameters by passing in keyword arguments which will be passed along to
the `CherryPyWSGIServer` constructor.
"""
from cherrypy.wsgiserver import CherryPyWSGIServer
bind_addr = (config['server.bind'], config['server.port'])
kwargs = dict(
numthreads=config['server.num-threads'],
server_name=config['server.name'],
request_queue_size=config['server.request-queue-size'],
timeout=config['server.timeout'])
kwargs.update(extra_config)
return lambda wsgi_app: CherryPyWSGIServer(bind_addr, wsgi_app, **kwargs)
Config.server_maker = server_maker
| {
"repo_name": "zacharyvoase/markdoc",
"stars": "347",
"repo_language": "Python",
"file_name": "apache.md",
"mime_type": "text/plain"
} |
# -*- coding: utf-8 -*-
import os.path as p
from markdoc.config import Config
def html_dir(config):
return p.abspath(p.join(config['meta.root'],
config.get('html-dir', config['hide-prefix'] + 'html')))
def static_dir(config):
return p.abspath(p.join(config['meta.root'], config.get('static-dir', 'static')))
def wiki_dir(config):
return p.abspath(p.join(config['meta.root'], config.get('wiki-dir', 'wiki')))
def temp_dir(config):
return p.abspath(p.join(config['meta.root'],
config.get('temp-dir', config['hide-prefix'] + 'tmp')))
def template_dir(config):
return p.abspath(p.join(config['meta.root'],
config.get('template-dir', config['hide-prefix'] + 'templates')))
Config.register_default('hide-prefix', '.')
Config.register_default('use-default-static', True)
Config.register_default('cvs-exclude', True)
Config.register_func_default('html-dir', lambda cfg, key: html_dir(cfg))
Config.register_func_default('static-dir', lambda cfg, key: static_dir(cfg))
Config.register_func_default('wiki-dir', lambda cfg, key: wiki_dir(cfg))
Config.register_func_default('temp-dir', lambda cfg, key: temp_dir(cfg))
Config.register_func_default('template-dir', lambda cfg, key: template_dir(cfg))
Config.html_dir = property(html_dir)
Config.static_dir = property(static_dir)
Config.wiki_dir = property(wiki_dir)
Config.temp_dir = property(temp_dir)
Config.template_dir = property(template_dir)
| {
"repo_name": "zacharyvoase/markdoc",
"stars": "347",
"repo_language": "Python",
"file_name": "apache.md",
"mime_type": "text/plain"
} |
# -*- coding: utf-8 -*-
import os
import os.path as p
import operator
import re
from markdoc.cache import DocumentCache, RenderCache, read_from
from markdoc.config import Config
from markdoc.render import make_relative
Config.register_default('listing-filename', '_list.html')
class Builder(object):
"""An object to handle all the parts of the wiki building process."""
def __init__(self, config):
self.config = config
self.doc_cache = DocumentCache(base=self.config.wiki_dir)
def render_func(path, doc):
level = len(path.lstrip('/').split('/')) - 1
return self.config.markdown(curr_path=path).convert(doc)
self.render_cache = RenderCache(render_func, self.doc_cache)
render_doc_func = lambda path, doc: self.render_document(path, cache=False)
self.document_render_cache = RenderCache(render_doc_func, self.render_cache)
def crumbs(self, path):
"""
Produce a breadcrumbs list for the given filename.
The crumbs are calculated based on the wiki root and the absolute path
to the current file.
Examples
--------
Assuming a wiki root of `/a/b/c`:
* `a/b/c/wiki/index.md` => `[('index', None)]`
* `a/b/c/wiki/subdir/index.md` =>
`[('index', '/'), ('subdir', None)]`
* `a/b/c/wiki/subdir/file.md` =>
`[('index', '/'), ('subdir', '/subdir/'), ('file', None)]
"""
if p.isabs(path):
path = self.doc_cache.relative(path)
rel_components = path.split(p.sep)
terminus = p.splitext(rel_components.pop())[0]
if not rel_components:
if terminus == 'index':
return [('index', None)]
return [('index', '/'), (terminus, None)]
elif terminus == 'index':
terminus = p.splitext(rel_components.pop())[0]
crumbs = [('index', '/')]
for component in rel_components:
path = '%s%s/' % (crumbs[-1][1], component)
crumbs.append((component, path))
crumbs.append((terminus, None))
return crumbs
def walk(self):
"""
Walk through the wiki, yielding info for each document.
For each document encountered, a `(filename, crumbs)` tuple will be
yielded.
"""
if not self.config['document-extensions']:
self.config['document-extensions'].append('')
def valid_extension(filename):
return any(filename.endswith(valid_ext)
for valid_ext in self.config['document-extensions'])
for dirpath, subdirs, files in os.walk(self.config.wiki_dir):
remove_hidden(subdirs); subdirs.sort()
remove_hidden(files); files.sort()
for filename in filter(valid_extension, files):
full_filename = p.join(dirpath, filename)
yield p.relpath(full_filename, start=self.config.wiki_dir)
def listing_context(self, directory):
"""
Generate the template context for a directory listing.
This method accepts a relative path, with the base assumed to be the
HTML root. This means listings must be generated after the wiki is
built, allowing them to list static media too.
Directories should always be '/'-delimited when specified, since it is
assumed that they are URL paths, not filesystem paths.
For information on what the produced context will look like, consult the
`listing` doctest.
"""
# Ensure the directory name ends with '/'.
directory = directory.strip('/')
# Resolve to filesystem paths.
fs_rel_dir = p.sep.join(directory.split('/'))
fs_abs_dir = p.join(self.config.html_dir, fs_rel_dir)
skip_files = set([self.config['listing-filename'], 'index.html'])
sub_directories, pages, files = [], [], []
for basename in os.listdir(fs_abs_dir):
fs_abs_path = p.join(fs_abs_dir, basename)
file_dict = {
'basename': basename,
'href': directory + '/' + basename}
if not file_dict['href'].startswith('/'):
file_dict['href'] = '/' + file_dict['href']
if p.isdir(fs_abs_path):
file_dict['href'] += '/'
sub_directories.append(file_dict)
else:
if (basename in skip_files or basename.startswith('.') or
basename.startswith('_')):
continue
file_dict['slug'] = p.splitext(basename)[0]
file_dict['size'] = p.getsize(fs_abs_path)
file_dict['humansize'] = humansize(file_dict['size'])
if p.splitext(basename)[1] == (p.extsep + 'html'):
# Get the title from the file.
contents = read_from(fs_abs_path)
file_dict['title'] = get_title(file_dict['slug'], contents)
# Remove .html from the end of the href.
file_dict['href'] = p.splitext(file_dict['href'])[0]
pages.append(file_dict)
else:
files.append(file_dict)
sub_directories.sort(key=lambda directory: directory['basename'])
pages.sort(key=lambda page: page['title'])
files.sort(key=lambda file_: file_['basename'])
return {
'directory': directory,
'sub_directories': sub_directories,
'pages': pages,
'files': files,
'make_relative': lambda href: make_relative(directory, href),
}
def render(self, path, cache=True):
return self.render_cache.render(path, cache=cache)
def title(self, path, cache=True):
return get_title(path, self.render(path, cache=cache))
def render_document(self, path, cache=True):
if cache:
return self.document_render_cache.render(path)
context = {}
context['content'] = self.render(path)
context['title'] = self.title(path)
context['crumbs'] = self.crumbs(path)
context['make_relative'] = lambda href: make_relative(path, href)
template = self.config.template_env.get_template('document.html')
return template.render(context)
def render_listing(self, path):
import jinja2
context = self.listing_context(path)
crumbs = [('index', '/')]
if path not in ['', '/']:
current_dir = ''
for component in path.strip('/').split('/'):
crumbs.append((component, '%s/%s/' % (current_dir, component)))
current_dir += '/' + component
crumbs.append((jinja2.Markup('<span class="list-crumb">list</span>'), None))
context['crumbs'] = crumbs
context['make_relative'] = lambda href: make_relative(path + '/', href)
template = self.config.template_env.get_template('listing.html')
return template.render(context)
def remove_hidden(names):
"""Remove (in-place) all strings starting with a '.' in the given list."""
i = 0
while i < len(names):
if names[i].startswith('.'):
names.pop(i)
else:
i += 1
return names
def get_title(filename, data):
"""Try to retrieve a title from a filename and its contents."""
match = re.search(r'<!-- ?title:(.+)-->', data, re.IGNORECASE)
if match:
return match.group(1).strip()
match = re.search(r'<h1[^>]*>([^<]+)</h1>', data, re.IGNORECASE)
if match:
return match.group(1)
name, extension = p.splitext(p.basename(filename))
return re.sub(r'[-_]+', ' ', name).title()
def humansize(size, base=1024):
import decimal
import math
if size == 0:
return '0B'
i = int(math.log(size, base))
prefix = 'BKMGTPEZY'[i]
number = decimal.Decimal(size) / (base ** i)
return str(number.to_integral()) + prefix
| {
"repo_name": "zacharyvoase/markdoc",
"stars": "347",
"repo_language": "Python",
"file_name": "apache.md",
"mime_type": "text/plain"
} |
# -*- coding: utf-8 -*-
import os.path as p
import jinja2
import markdoc
from markdoc.config import Config
Config.register_default('use-default-templates', True)
def build_template_env(config):
"""Build a Jinja2 template environment for a given config."""
load_path = []
if p.isdir(config.template_dir):
load_path.append(config.template_dir)
if config['use-default-templates']:
load_path.append(markdoc.default_template_dir)
environment = jinja2.Environment(loader=jinja2.FileSystemLoader(load_path))
environment.globals['config'] = config
return environment
def template_env(config):
if not getattr(config, '_template_env', None):
config._template_env = build_template_env(config)
return config._template_env
Config.template_env = property(template_env)
| {
"repo_name": "zacharyvoase/markdoc",
"stars": "347",
"repo_language": "Python",
"file_name": "apache.md",
"mime_type": "text/plain"
} |
# -*- coding: utf-8 -*-
import codecs
from functools import wraps
import os
import os.path as p
import time
class DocumentCache(object):
"""
A high-level document cache for caching the content of files.
This is a read-only cache which uses the OS-reported modification timestamps
for files (via `os.stat()`) to determine cache dirtiness, and then refreshes
its cache behind the scenes when files are requested.
You can access values via `.get()` (which supports several options) or via
simple subscription syntax (i.e. `cache[path]`). The cache is configured
with a 'root' on instantiation, by which all relative paths are resolved.
"""
def __init__(self, base=None, cache=None, encoding='utf-8'):
if cache is None:
cache = {}
self.cache = cache
if base is None:
base = os.getcwd()
self.base = base
self.encoding = encoding
absolute = lambda self, relpath: p.join(self.base, relpath)
relative = lambda self, abspath: p.relpath(abspath, start=self.base)
def has_latest_version(self, path):
"""Determine whether the cache for a path is up to date."""
# No-op for already-absolute paths.
path = self.absolute(path)
if path not in self.cache:
return False
cached_mtime = self.cache[path][0]
return os.stat(path).st_mtime <= cached_mtime
def refresh_cache(self, path, encoding=None):
"""Refresh the cache, no matter what, with an optional encoding."""
path = self.absolute(path)
encoding = encoding or self.encoding
data = read_from(path, encoding=encoding)
mtime = os.stat(path).st_mtime
self.cache[path] = (mtime, data)
def update_to_latest_version(self, path):
"""If necessary, refresh the cache's copy of a file."""
if not self.has_latest_version(path):
self.refresh_cache(path)
def get(self, path, cache=True, encoding=None):
"""Retrieve the data for a given path, optionally using the cache."""
path = self.absolute(path)
if cache:
self.update_to_latest_version(path)
return self.cache[path][1] # (mtime, data)[1]
if not p.isfile(path):
return None
if encoding is None:
encoding = self.encoding
return read_from(path, encoding=encoding)
def __getitem__(self, path):
result = self.get(path)
if result is None:
raise KeyError(path)
return result
class RenderCache(object):
def __init__(self, render_func, document_cache):
self.render_func = render_func
self.doc_cache = document_cache
# The two-cache structure allows us to garbage collect rendered results
# for old versions of documents.
# pathname => document hash
self.hash_cache = {}
# document hash => rendered results
self.result_cache = {}
def render(self, path, cache=True):
"""Render the contents of a filename, optionally using the cache."""
document = self.doc_cache.get(path, cache=cache)
if cache:
doc_hash = (hash(path), hash(document))
if path in self.hash_cache and self.hash_cache[path] != doc_hash:
self.result_cache.pop(self.hash_cache[path], None)
self.hash_cache[path] = doc_hash
if doc_hash not in self.result_cache:
self.result_cache[doc_hash] = self.render_func(path, document)
return self.result_cache[doc_hash]
else:
return self.render_func(document)
get = render # For compatibility with the document cache.
def read_from(filename, encoding='utf-8'):
"""Read data from a filename, optionally with an encoding."""
if encoding is None:
fp = open(filename)
else:
fp = codecs.open(filename, encoding=encoding)
try:
return fp.read()
finally:
fp.close()
| {
"repo_name": "zacharyvoase/markdoc",
"stars": "347",
"repo_language": "Python",
"file_name": "apache.md",
"mime_type": "text/plain"
} |
# -*- coding: utf-8 -*-
import logging
import os
import os.path as p
__version__ = '0.6.6'
static_dir = p.join(p.dirname(__file__), 'static')
default_static_dir = p.join(static_dir, 'default-static')
default_template_dir = p.join(static_dir, 'default-templates')
if not hasattr(p, 'relpath'):
def relpath(path, start=p.curdir):
"""Return a relative version of a path"""
if not path:
raise ValueError("no path specified")
start_list = p.abspath(start).split(p.sep)
path_list = p.abspath(path).split(p.sep)
# Work out how much of the filepath is shared by start and path.
i = len(p.commonprefix([start_list, path_list]))
rel_list = [p.pardir] * (len(start_list)-i) + path_list[i:]
if not rel_list:
return p.curdir
return p.join(*rel_list)
p.relpath = relpath
default_formatter = logging.Formatter(
u'%(name)s: %(levelname)s: %(message)s')
console_handler = logging.StreamHandler() # By default, outputs to stderr.
console_handler.setFormatter(default_formatter)
console_handler.setLevel(logging.DEBUG)
logging.getLogger('markdoc').addHandler(console_handler)
logging.getLogger('markdoc').setLevel(logging.INFO) # Default level.
# These modules all initialize various default config values, so need to be
# imported straight away.
import markdoc.builder
import markdoc.directories
import markdoc.render
import markdoc.server
import markdoc.templates
| {
"repo_name": "zacharyvoase/markdoc",
"stars": "347",
"repo_language": "Python",
"file_name": "apache.md",
"mime_type": "text/plain"
} |
# -*- coding: utf-8 -*-
import logging
import mimetypes
import os.path as p
import webob
from markdoc.render import make_relative
if not mimetypes.inited:
mimetypes.init()
# Assume all HTML files are XHTML.
mimetypes.types_map['.html'] = mimetypes.types_map['.xhtml']
class MarkdocWSGIApplication(object):
"""
A WSGI application which will serve up a Markdoc wiki.
Note that this application is not specifically reserved for Markdoc wikis,
but was designed especially for them. The handling of requests is simple,
and is based on the request path:
/[a/b/.../c/]filename
* If the file exists relative to the docroot, serve it; else
* If the filename with the extension 'html' exists relative to the
docroot, serve it; else
* If a directory exists with that name, return a redirect to it (with a
trailing slash); else
* Return a HTTP 404 ‘Not Found’.
/[a/b/.../c/]directory/ (including the index, '/')
* If the directory exists, look for an 'index.html' file inside it, and
serve it if it exists; else
* If a file of the same name exists in the parent directory, return a
redirect to it (without the trailing slash); else
* Return a HTTP 404 ‘Not Found’.
In the context of Markdoc, if a directory does not contain an 'index.md'
file, a listing will be generated and saved as the 'index.html' file for
that directory.
"""
def __init__(self, config):
self.config = config
self.log = logging.getLogger('markdoc.wsgi')
def __call__(self, environ, start_response):
request = webob.Request(environ)
response = self.get_response(request)
self.log.info('%s %s - %d' % (request.method, request.path_info, response.status_int))
return response(environ, start_response)
def is_safe(self, directory):
"""Make sure the given absolute path does not point above the htroot."""
return p.pardir not in p.relpath(directory, start=self.config.html_dir).split(p.sep)
def get_response(self, request):
if request.path_info.endswith('/'):
return self.directory(request)
return self.file(request)
def directory(self, request):
"""
Serve a request which points to a directory.
* If the directory exists, look for an 'index.html' file inside it, and
serve it if it exists; else
* If a file of the same name exists in the parent directory, return a
redirect to it (without the trailing slash); else
* If a file of the same name with a 'html' extension exists in the
parent directory, redirect to it (without the trailing slash); else
* Return a HTTP 404 ‘Not Found’.
"""
path_parts = request.path_info.strip('/').split('/')
index_filename = p.join(self.config.html_dir, *(path_parts + ['index.html']))
if p.exists(index_filename) and self.is_safe(index_filename):
return serve_file(index_filename)
directory_filename = p.join(self.config.html_dir, *path_parts)
if p.isfile(directory_filename) or p.isfile(directory_filename + p.extsep + 'html'):
return temp_redirect(request.path_info.rstrip('/'))
return self.not_found(request)
def file(self, request):
"""
Serve a request which points to a file.
* If the file exists relative to the docroot, serve it; else
* If the filename with the extension 'html' exists relative to the
docroot, serve it; else
* If a directory exists with that name, return a redirect to it (with a
trailing slash); else
* Return a HTTP 404 ‘Not Found’.
"""
path_parts = request.path_info.strip('/').split('/')
filename = p.abspath(p.join(self.config.html_dir, *path_parts))
if not self.is_safe(filename):
return self.forbidden(request)
if p.isfile(filename):
pass
elif p.isfile(filename + p.extsep + 'html'):
filename = filename + p.extsep + 'html'
else:
if p.isdir(filename):
return temp_redirect(request.path_info + '/')
return self.not_found(request)
return serve_file(filename)
def error(self, request, status):
"""
Serve a page for a given HTTP error.
This works by rendering a template based on the HTTP error code; so an
error of '404 Not Found' will render the '404.html' template. The
context passed to the template is as follows:
`request`
: The `webob.Request` object for this HTTP request.
`is_index`
: A boolean indicating whether or not this is the index page. This may
be useful in error pages where you want to link back to the home page;
such a link will be useless in the index.
`status`
: An integer representing the HTTP status code of this error.
`reason`
: A string of the HTTP status 'reason', such as 'Not Found' for 404.
The template is assumed to be valid XHTML.
Note that the templating machinery is only invoked when the browser is
expecting HTML. This is determined by calling
`request.accept.accept_html()`. If not, an empty response (i.e. one
without a content body) is returned.
"""
response = webob.Response()
response.status = status
if request.accept.accept_html():
context = {}
context['request'] = request
context['is_index'] = request.path_info in ['/', '/index.html']
context['make_relative'] = lambda href: make_relative(request.path_info, href)
context['status'] = status
context['reason'] = webob.util.status_reasons[status]
template = self.config.template_env.get_template('%d.html' % status)
response.unicode_body = template.render(context)
response.content_type = mimetypes.types_map['.xhtml']
else:
del response.content_length
del response.content_type
return response
forbidden = lambda self, request: self.error(request, 403)
not_found = lambda self, request: self.error(request, 404)
def redirect(location, permanent=False):
"""Issue an optionally-permanent redirect to another location."""
response = webob.Response()
response.status = 301 if permanent else 302
response.location = location
del response.content_type
del response.content_length
return response
temp_redirect = lambda location: redirect(location, permanent=False)
perm_redirect = lambda location: redirect(location, permanent=True)
def serve_file(filename, content_type=None, chunk_size=4096):
"""
Serve the specified file as a chunked response.
Return a `webob.Response` instance which will serve up the file in chunks,
as specified by the `chunk_size` parameter (default 4KB).
You can also specify a content type with the `content_type` keyword
argument. If you do not, the content type will be inferred from the
filename; so 'index.html' will be interpreted as 'application/xhtml+xml',
'file.mp3' as 'audio/mpeg', et cetera. If none can be guessed, the content
type will be reported as 'application/octet-stream'.
"""
if content_type is None:
content_type = mimetypes.guess_type(filename)[0] or 'application/octet-stream'
if content_type.startswith('text/html'):
content_type = content_type.replace('text/html', 'application/xhtml+xml')
def chunked_read(chunk_size=4096):
fp = open(filename, 'rb')
try:
data = fp.read(chunk_size)
while data:
yield data
data = fp.read(chunk_size)
finally:
fp.close()
response = webob.Response(content_type=content_type)
response.app_iter = chunked_read()
response.content_length = p.getsize(filename)
return response
| {
"repo_name": "zacharyvoase/markdoc",
"stars": "347",
"repo_language": "Python",
"file_name": "apache.md",
"mime_type": "text/plain"
} |
# -*- coding: utf-8 -*-
"""Utilities for working with Markdoc configurations."""
import copy
import os
import os.path as p
import markdown
import yaml
import markdoc.exc
class ConfigNotFound(markdoc.exc.AbortError):
"""The configuration file was not found."""
pass
class ConfigMeta(type):
def __new__(mcls, name, bases, attrs):
cls = type.__new__(mcls, name, bases, attrs)
cls._defaults = {}
cls._func_defaults = {}
return cls
def register_default(cls, key, default_value):
"""Register a default value for a given key."""
cls._defaults[key] = default_value
def register_func_default(cls, key, function):
"""Register a callable as a functional default for a key."""
cls._func_defaults[key] = function
def func_default_for(cls, key):
"""Decorator to define a functional default for a given key."""
return lambda function: [cls.register_func_default(key, function),
function][1]
class Config(dict):
"""
A dictionary which represents a single wiki's Markdoc configuration.
When instantiating this dictionary, if you aren't using an actual
configuration file, just remember to set `config['meta.root']` to the
wiki root; you can use `None` as the value for config_file. For example:
# With a filename:
config = Config('filename.yaml', {...})
# Without a filename:
config = Config(None, {'meta': {'root': '/path/to/wiki/root/'}, ...})
"""
__metaclass__ = ConfigMeta
def __init__(self, config_file, config):
super(Config, self).__init__(flatten(config))
self['meta.config-file'] = config_file
self['meta.root'] = p.dirname(config_file)
def __getitem__(self, key):
try:
return dict.__getitem__(self, key)
except KeyError:
if key in self._defaults:
self[key] = copy.copy(self._defaults[key])
elif key in self._func_defaults:
self[key] = self._func_defaults[key](self, key)
else:
raise
return dict.__getitem__(self, key)
def __delitem__(self, key):
if (key not in self):
return # fail silently.
return dict.__delitem__(self, key)
@classmethod
def for_directory(cls, directory=None):
"""
Get the configuration from the 'markdoc.yaml' file in a directory.
If you do not specify a directory, this method will use the current
working directory.
"""
if directory is None:
directory = os.getcwd()
if p.exists(p.join(directory, 'markdoc.yaml')):
return cls.for_file(p.join(directory, 'markdoc.yaml'))
elif p.exists(p.join(directory, '.markdoc.yaml')):
return cls.for_file(p.join(directory, '.markdoc.yaml'))
raise ConfigNotFound("A markdoc configuration could not be found.")
@classmethod
def for_file(cls, filename):
"""Get the configuration from a given YAML file."""
if not p.exists(filename):
relpath = p.relpath(p.dirname(filename), start=os.getcwd())
basename = p.basename(filename)
if relpath == '.':
raise ConfigNotFound("%s was not found in the current directory" % basename)
raise ConfigNotFound("%s was not found in %s" % (basename, relpath))
fp = open(filename)
try:
config = yaml.load(fp) or {}
finally:
fp.close()
return cls(filename, config)
def flatten(dictionary, prefix=''):
"""
Flatten nested dictionaries into dotted keys.
>>> d = {
... 'a': {
... 'b': 1,
... 'c': {
... 'd': 2,
... 'e': {
... 'f': 3
... }
... }
... },
... 'g': 4,
... }
>>> sorted(flatten(d).items())
[('a.b', 1), ('a.c.d', 2), ('a.c.e.f', 3), ('g', 4)]
"""
for key in dictionary.keys():
value = dictionary.pop(key)
if not isinstance(value, dict):
dictionary[prefix + key] = value
else:
for key2 in value.keys():
value2 = value.pop(key2)
if not isinstance(value2, dict):
dictionary[prefix + key + '.' + key2] = value2
else:
dictionary.update(flatten(value2,
prefix=(prefix + key + '.' + key2 + '.')))
return dictionary
| {
"repo_name": "zacharyvoase/markdoc",
"stars": "347",
"repo_language": "Python",
"file_name": "apache.md",
"mime_type": "text/plain"
} |
# -*- coding: utf-8 -*-
import os.path as p
from markdoc.config import Config
import markdown
Config.register_default('markdown.extensions', ())
Config.register_func_default('markdown.extension-configs', lambda cfg, key: {})
Config.register_default('markdown.safe-mode', False)
Config.register_default('markdown.output-format', 'xhtml1')
Config.register_default('document-extensions',
frozenset(['.md', '.mdown', '.markdown', '.wiki', '.text']))
class RelativeLinksTreeProcessor(markdown.treeprocessors.Treeprocessor):
"""A Markdown tree processor to relativize wiki links."""
def __init__(self, curr_path='/'):
self.curr_path = curr_path
def make_relative(self, href):
return make_relative(self.curr_path, href)
def run(self, tree):
links = tree.getiterator('a')
for link in links:
if link.attrib['href'].startswith('/'):
link.attrib['href'] = self.make_relative(link.attrib['href'])
return tree
def make_relative(curr_path, href):
"""Given a current path and a href, return an equivalent relative path."""
curr_list = curr_path.lstrip('/').split('/')
href_list = href.lstrip('/').split('/')
# How many path components are shared between the two paths?
i = len(p.commonprefix([curr_list, href_list]))
rel_list = (['..'] * (len(curr_list) - i - 1)) + href_list[i:]
if not rel_list or rel_list == ['']:
return './'
return '/'.join(rel_list)
def unflatten_extension_configs(config):
"""Unflatten the markdown extension configs from the config dictionary."""
configs = config['markdown.extension-configs']
for key, value in config.iteritems():
if not key.startswith('markdown.extension-configs.'):
continue
parts = key[len('markdown.extension-configs.'):].split('.')
extension_config = configs
for part in parts[:-1]:
extension_config = extension_config.setdefault(part, {})
extension_config[parts[-1]] = value
return configs
def get_markdown_instance(config, curr_path='/', **extra_config):
"""Return a `markdown.Markdown` instance for a given configuration."""
mdconfig = dict(
extensions=config['markdown.extensions'],
extension_configs=unflatten_extension_configs(config),
safe_mode=config['markdown.safe-mode'],
output_format=config['markdown.output-format'])
mdconfig.update(extra_config) # Include any extra kwargs.
md_instance = markdown.Markdown(**mdconfig)
md_instance.treeprocessors['relative_links'] = RelativeLinksTreeProcessor(curr_path=curr_path)
return md_instance
# Add it as a method to `markdoc.config.Config`.
Config.markdown = get_markdown_instance
| {
"repo_name": "zacharyvoase/markdoc",
"stars": "347",
"repo_language": "Python",
"file_name": "apache.md",
"mime_type": "text/plain"
} |
# -*- coding: utf-8 -*-
class MarkdocError(Exception):
"""An error occurred whilst running the markdoc utility."""
pass
class AbortError(MarkdocError):
"""An exception occurred which should cause Markdoc to abort."""
pass
| {
"repo_name": "zacharyvoase/markdoc",
"stars": "347",
"repo_language": "Python",
"file_name": "apache.md",
"mime_type": "text/plain"
} |
.codehilite { background: #ffffff; }
.codehilite .c { color: #999988; font-style: italic } /* Comment */
.codehilite .err { color: #a61717; background-color: #e3d2d2 } /* Error */
.codehilite .k { font-weight: bold } /* Keyword */
.codehilite .o { font-weight: bold } /* Operator */
.codehilite .cm { color: #999988; font-style: italic } /* Comment.Multiline */
.codehilite .cp { color: #999999; font-weight: bold } /* Comment.Preproc */
.codehilite .c1 { color: #999988; font-style: italic } /* Comment.Single */
.codehilite .cs { color: #999999; font-weight: bold; font-style: italic } /* Comment.Special */
.codehilite .gd { color: #000000; background-color: #ffdddd } /* Generic.Deleted */
.codehilite .gd .x { color: #000000; background-color: #ffaaaa } /* Generic.Deleted.Specific */
.codehilite .ge { font-style: italic } /* Generic.Emph */
.codehilite .gr { color: #aa0000 } /* Generic.Error */
.codehilite .gh { color: #999999 } /* Generic.Heading */
.codehilite .gi { color: #000000; background-color: #ddffdd } /* Generic.Inserted */
.codehilite .gi .x { color: #000000; background-color: #aaffaa } /* Generic.Inserted.Specific */
.codehilite .go { color: #888888 } /* Generic.Output */
.codehilite .gp { color: #555555 } /* Generic.Prompt */
.codehilite .gs { font-weight: bold } /* Generic.Strong */
.codehilite .gu { color: #aaaaaa } /* Generic.Subheading */
.codehilite .gt { color: #aa0000 } /* Generic.Traceback */
.codehilite .kc { font-weight: bold } /* Keyword.Constant */
.codehilite .kd { font-weight: bold } /* Keyword.Declaration */
.codehilite .kp { font-weight: bold } /* Keyword.Pseudo */
.codehilite .kr { font-weight: bold } /* Keyword.Reserved */
.codehilite .kt { color: #445588; font-weight: bold } /* Keyword.Type */
.codehilite .m { color: #009999 } /* Literal.Number */
.codehilite .s { color: #d14 } /* Literal.String */
.codehilite .na { color: #008080 } /* Name.Attribute */
.codehilite .nb { color: #0086B3 } /* Name.Builtin */
.codehilite .nc { color: #445588; font-weight: bold } /* Name.Class */
.codehilite .no { color: #008080 } /* Name.Constant */
.codehilite .ni { color: #800080 } /* Name.Entity */
.codehilite .ne { color: #990000; font-weight: bold } /* Name.Exception */
.codehilite .nf { color: #990000; font-weight: bold } /* Name.Function */
.codehilite .nn { color: #555555 } /* Name.Namespace */
.codehilite .nt { color: #000080 } /* Name.Tag */
.codehilite .nv { color: #008080 } /* Name.Variable */
.codehilite .ow { font-weight: bold } /* Operator.Word */
.codehilite .w { color: #bbbbbb } /* Text.Whitespace */
.codehilite .mf { color: #009999 } /* Literal.Number.Float */
.codehilite .mh { color: #009999 } /* Literal.Number.Hex */
.codehilite .mi { color: #009999 } /* Literal.Number.Integer */
.codehilite .mo { color: #009999 } /* Literal.Number.Oct */
.codehilite .sb { color: #d14 } /* Literal.String.Backtick */
.codehilite .sc { color: #d14 } /* Literal.String.Char */
.codehilite .sd { color: #d14 } /* Literal.String.Doc */
.codehilite .s2 { color: #d14 } /* Literal.String.Double */
.codehilite .se { color: #d14 } /* Literal.String.Escape */
.codehilite .sh { color: #d14 } /* Literal.String.Heredoc */
.codehilite .si { color: #d14 } /* Literal.String.Interpol */
.codehilite .sx { color: #d14 } /* Literal.String.Other */
.codehilite .sr { color: #009926 } /* Literal.String.Regex */
.codehilite .s1 { color: #d14 } /* Literal.String.Single */
.codehilite .ss { color: #990073 } /* Literal.String.Symbol */
.codehilite .bp { color: #999999 } /* Name.Builtin.Pseudo */
.codehilite .vc { color: #008080 } /* Name.Variable.Class */
.codehilite .vg { color: #008080 } /* Name.Variable.Global */
.codehilite .vi { color: #008080 } /* Name.Variable.Instance */
.codehilite .il { color: #009999 } /* Literal.Number.Integer.Long */ | {
"repo_name": "zacharyvoase/markdoc",
"stars": "347",
"repo_language": "Python",
"file_name": "apache.md",
"mime_type": "text/plain"
} |
html {
background-color: #f6f6f6; }
body {
background-color: white;
margin: 0 auto;
width: 650px; }
body #breadcrumbs, body #content, body #footer {
background-color: white;
clear: both;
float: left;
overflow: hidden;
padding: 0 20px;
width: 610px; }
body #breadcrumbs {
border-bottom: 2px solid #f6f6f6;
height: 28px;
margin: 0;
padding: 0;
width: 650px; }
body #breadcrumbs li {
float: left;
list-style: none;
margin: 0;
padding: 0; }
body #breadcrumbs li a {
display: block;
float: left;
padding: 0 8px; }
body #breadcrumbs li.last {
padding: 0 8px; }
body #breadcrumbs li.not-last:after {
content: "»";
float: right; }
body #footer {
border-top: 8px solid #f6f6f6;
padding-top: 13px; }
body .clear {
clear: both;
border-width: 0;
margin: 0;
visibility: hidden; }
body.listing table#pages tr, body.listing table#subdirs tr, body.listing table#files tr {
border-bottom: 1px solid #777;
border-top: 1px solid #777; }
body.listing table#pages td, body.listing table#subdirs td, body.listing table#files td {
border: none; }
body.listing table#pages td.size, body.listing table#subdirs td.size, body.listing table#files td.size {
background-color: #f6f6f6; }
body.listing table#pages td.name, body.listing table#subdirs td.name, body.listing table#files td.name {
padding: 0; }
body.listing table#pages td.name a, body.listing table#subdirs td.name a, body.listing table#files td.name a {
display: block;
margin: 0;
padding: 4px 8px; }
blockquote {
background-color: #f6f6f6;
padding: 13px;
padding-bottom: 1px; }
hr {
border-style: solid;
border: none;
border-top: 1px solid #777;
margin: 28px 0; }
dl {
margin-left: 0; }
dl dd {
margin-bottom: 13px;
margin-left: 13px; }
ul {
margin-top: 0; }
ul li {
list-style: square outside; }
ul ul {
margin-bottom: 0; }
pre {
border-left: 1px solid gray;
margin-bottom: 13px;
margin-left: 30px;
padding-left: 12px; }
.codehilite {
border-left: 1px solid gray;
margin-bottom: 13px;
margin-left: 30px;
padding-left: 12px; }
.codehilite pre {
border: none;
margin: 0;
padding: 0; }
.codehilitetable {
margin-left: 0;
padding-left: 0; }
.codehilitetable tr td {
border: none;
padding: 3px 5px 0 5px; }
.codehilitetable tr td.linenos {
background-color: #f6f6f6;
border-right: 1px solid gray;
margin: 0;
padding-right: 6px;
text-align: right;
width: 19px; }
.codehilitetable tr td.linenos .linenodiv pre {
border: none;
margin: 0;
padding: 0; }
.codehilitetable tr td.code {
margin: 0;
padding-left: 12px; }
.codehilitetable tr td.code .codehilite {
border: none;
margin: 0;
padding: 0; }
body {
font-family: 'Helvetica Neue', Helvetica, Arial, Geneva, sans-serif;
line-height: 21px; }
body #breadcrumbs li {
color: #aaa;
font-size: 13px;
font-weight: bold;
line-height: 28px; }
body #breadcrumbs li a {
text-decoration: none; }
body #breadcrumbs li .list-crumb {
font-weight: normal; }
body #footer {
color: #777;
font-size: 13px;
text-transform: lowercase; }
body.listing table#pages td.size, body.listing table#subdirs td.size {
font-family: Menlo, 'DejaVu Sans Mono', 'Bitstream Vera Sans Mono', Courier, 'Courier 10 Pitch', 'Courier New', monospace;
text-align: right; }
body.listing table#subdirs td.name {
font-family: Courier, 'Courier 10 Pitch', 'Courier New', monospace; }
h1, h2, h3, h4, h5, h6 {
line-height: 21px; }
h1 {
font-size: 21px; }
h2 {
font-size: 18px; }
h3 {
font-size: 15px; }
h4, h5, h6 {
font-size: 13px; }
a {
color: #990000;
text-decoration: none; }
a:hover {
color: #4c0000; }
a[href^="http:"] {
text-decoration: underline; }
dl dt {
font-weight: bold; }
code {
font-family: Courier, 'Courier 10 Pitch', 'Courier New', monospace;
line-height: 18px; }
pre {
font-family: Menlo, 'DejaVu Sans Mono', 'Bitstream Vera Sans Mono', Courier, 'Courier 10 Pitch', 'Courier New', monospace;
font-size: 11px;
line-height: 18px; }
| {
"repo_name": "zacharyvoase/markdoc",
"stars": "347",
"repo_language": "Python",
"file_name": "apache.md",
"mime_type": "text/plain"
} |
html
:background-color #f6f6f6
body
:background-color white
:margin 0 auto
:width 650px
#breadcrumbs, #content, #footer
:background-color white
:clear both
:float left
:overflow hidden
:padding 0 20px
:width= 650px - (20px + 20px)
#breadcrumbs
:border-bottom 2px solid #f6f6f6
:height 28px
:margin 0
:padding 0
:width 650px
li
:float left
:list-style none
:margin 0
:padding 0
a
:display block
:float left
:padding 0 8px
&.last
:padding 0 8px
&.not-last:after
:content "»"
:float right
#footer
:border-top 8px solid #f6f6f6
:padding-top 13px
.clear
:clear both
:border-width 0
:margin 0
:visibility hidden
body.listing
table#pages, table#subdirs, table#files
tr
:border-bottom 1px solid #777
:border-top 1px solid #777
td
:border none
&.size
:background-color #f6f6f6
&.name
:padding 0
a
:display block
:margin 0
:padding 4px 8px
// Common elements
blockquote
:background-color #f6f6f6
:padding 13px
:padding-bottom 1px
hr
:border-style solid
:border none
:border-top 1px solid #777
:margin 28px 0
// Lists
dl
:margin-left 0
dd
:margin-bottom 13px
:margin-left 13px
ul
:margin-top 0
li
:list-style square outside
ul
:margin-bottom 0
// Code blocks
=codeblock
:border-left 1px solid gray
:margin-bottom 13px
:margin-left 30px
:padding-left 12px
pre
+codeblock
.codehilite
+codeblock
pre
:border none
:margin 0
:padding 0
.codehilitetable
:margin-left 0
:padding-left 0
tr td
:border none
:padding 3px 5px 0 5px
&.linenos
:background-color #f6f6f6
:border-right 1px solid gray
:margin 0
:padding-right 6px
:text-align right
:width= 30px - (5px + 6px)
.linenodiv pre
:border none
:margin 0
:padding 0
&.code
:margin 0
:padding-left 12px
.codehilite
:border none
:margin 0
:padding 0
| {
"repo_name": "zacharyvoase/markdoc",
"stars": "347",
"repo_language": "Python",
"file_name": "apache.md",
"mime_type": "text/plain"
} |
@import _layout.sass
@import _typography.sass
| {
"repo_name": "zacharyvoase/markdoc",
"stars": "347",
"repo_language": "Python",
"file_name": "apache.md",
"mime_type": "text/plain"
} |
!monospace_fonts_block = "Menlo", "'DejaVu Sans Mono'", "'Bitstream Vera Sans Mono'", "Courier", "'Courier 10 Pitch'", "'Courier New'", "monospace"
!monospace_fonts_inline = "Courier", "'Courier 10 Pitch'", "'Courier New'", "monospace"
!sans_serif_fonts = "'Helvetica Neue'", "Helvetica", "Arial", "Geneva", "sans-serif"
body
:font-family= !sans_serif_fonts
:line-height 21px
#breadcrumbs
li
:color #aaa
:font-size 13px
:font-weight bold
:line-height 28px
a
:text-decoration none
.list-crumb
:font-weight normal
#footer
:color #777
:font-size 13px
:text-transform lowercase
&.listing
table#pages, table#subdirs
td.size
:font-family= !monospace_fonts_block
:text-align right
table#subdirs
td.name
:font-family= !monospace_fonts_inline
// Headers
h1, h2, h3, h4, h5, h6
:line-height 21px
h1
:font-size 21px
h2
:font-size 18px
h3
:font-size 15px
h4, h5, h6
:font-size 13px
// Links
!link_color = #900
a
:color= !link_color
:text-decoration none
&:hover
:color= !link_color * 0.5
&[href^="http:"]
:text-decoration underline
// Lists
dl
dt
:font-weight bold
// Code
code
:font-family= !monospace_fonts_inline
:line-height 18px
pre
:font-family= !monospace_fonts_block
:font-size 11px
:line-height 18px
| {
"repo_name": "zacharyvoase/markdoc",
"stars": "347",
"repo_language": "Python",
"file_name": "apache.md",
"mime_type": "text/plain"
} |
{% extends 'markdoc-default/404.html' %}
| {
"repo_name": "zacharyvoase/markdoc",
"stars": "347",
"repo_language": "Python",
"file_name": "apache.md",
"mime_type": "text/plain"
} |
{% extends 'markdoc-default/document.html' %}
| {
"repo_name": "zacharyvoase/markdoc",
"stars": "347",
"repo_language": "Python",
"file_name": "apache.md",
"mime_type": "text/plain"
} |
{% extends 'markdoc-default/listing.html' %}
| {
"repo_name": "zacharyvoase/markdoc",
"stars": "347",
"repo_language": "Python",
"file_name": "apache.md",
"mime_type": "text/plain"
} |
{% extends 'markdoc-default/base.html' %}
| {
"repo_name": "zacharyvoase/markdoc",
"stars": "347",
"repo_language": "Python",
"file_name": "apache.md",
"mime_type": "text/plain"
} |
{% macro crumbs(breadcrumbs) %}
{% if breadcrumbs %}
<ol id="breadcrumbs">
{% for name, href in breadcrumbs %}
<li class="crumb-{{ loop.index0 }} {% if loop.last %}last{% else %}not-last{% endif %}">
{% if not href %}
{{ name|e }}
{% else %}
<a href="{{ make_relative(href)|e }}">{{ name|e }}</a>
{% endif %}
</li>
{% endfor %}
</ol> <!-- ol#breadcrumbs -->
{% endif %}
{% endmacro %} | {
"repo_name": "zacharyvoase/markdoc",
"stars": "347",
"repo_language": "Python",
"file_name": "apache.md",
"mime_type": "text/plain"
} |
{% macro cssimport(css_href, media="screen, projection") -%}
<link rel="stylesheet" type="text/css" href="{{ css_href }}" {% if media %}media="{{ media }}" {% endif %}/>
{%- endmacro %}
{% macro css() -%}
<style type="text/css">
{{ caller() }}
</style>
{%- endmacro %}
{% macro jsimport(js_href) -%}
<script type="application/javascript" src="{{ js_href }}"></script>
{%- endmacro %}
{% macro js() -%}
<script type="text/javascript">
{{ caller() }}
</script>
{%- endmacro %}
| {
"repo_name": "zacharyvoase/markdoc",
"stars": "347",
"repo_language": "Python",
"file_name": "apache.md",
"mime_type": "text/plain"
} |
{% extends 'base.html' %}
{% block title %}Not Found: {{ request.path_info|e }}{% endblock %}
{% block content %}
<h1>Not Found: <code>{{ request.path_info|e }}</code></h1>
<p>
We couldn’t find what you were looking for.
{% if not is_index %}You could try going <a href="/">home</a>.{% endif %}
</p>
{% endblock %}
| {
"repo_name": "zacharyvoase/markdoc",
"stars": "347",
"repo_language": "Python",
"file_name": "apache.md",
"mime_type": "text/plain"
} |
{% extends 'base.html' %}
{% block content %}{{ content }}{% endblock %}
| {
"repo_name": "zacharyvoase/markdoc",
"stars": "347",
"repo_language": "Python",
"file_name": "apache.md",
"mime_type": "text/plain"
} |
{% extends 'base.html' %}
{% block title %}ls /{{ directory|e }}{% endblock %}
{% block body_attrs %}class="listing"{% endblock %}
{% block content %}
<h1><code>ls /{{ directory|e }}</code></h1>
{% if sub_directories %}
<h2>Directories</h2>
<table id="subdirs">
{% for subdir in sub_directories %}
<tr>
<td class="name">
<a class="dirlink" href="{{ make_relative(subdir.href)|e }}">
{{ subdir.basename|e }}/
</a>
</td>
</tr>
{% endfor %}
</table>
{% endif %}
{% if pages %}
<h2>Pages</h2>
<table id="pages">
{% for page in pages %}
<tr>
<td class="size">{{ page.humansize }}</td>
<td class="name">
<a href="{{ make_relative(page.href)|e }}" title="{{ page.title|e }}">
{{ page.title|e }}
</a>
</td>
</tr>
{% endfor %}
</table>
{% endif %}
{% if files %}
<h2>Files</h2>
<table id="files">
{% for file in files %}
<tr>
<td class="size">{{ file.humansize }}</td>
<td class="name">
<a href="{{ make_relative(file.href)|e }}">
<code>{{ file.basename|e }}</code>
</a>
</td>
</tr>
{% endfor %}
</table>
{% endif %}
{% endblock %}
| {
"repo_name": "zacharyvoase/markdoc",
"stars": "347",
"repo_language": "Python",
"file_name": "apache.md",
"mime_type": "text/plain"
} |
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML+RDFa 1.0//EN"
"http://www.w3.org/MarkUp/DTD/xhtml-rdfa-1.dtd">
<html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en"
{% block xmlns -%}
xmlns:rdf="http://www.w3.org/1999/02/22-rdf-syntax-ns#"
xmlns:rdfs="http://www.w3.org/2000/01/rdf-schema#"
xmlns:dc="http://purl.org/dc/elements/1.1/"
xmlns:foaf="http://xmlns.com/foaf/0.1/"
{%- endblock %}>
{% import "macros/html" as html -%}
{% import "macros/crumbs" as breadcrumbs with context -%}
<head>
{% block head %}
{% block meta %}
<meta http-equiv="Content-Type" content="application/xhtml+xml; charset=UTF-8" />
{% endblock %}
<title>
{% block title_prefix -%}
{% if 'wiki-name' in config %}{{ config['wiki-name']|e }} » {% endif %}
{%- endblock %}
{% block title -%}
{{ title }}
{%- endblock %}
</title>
{% block css %}
<!-- YUI CSS reset, fonts, base -->
{{ html.cssimport(("http://yui.yahooapis.com/combo?" +
"3.0.0/build/cssreset/reset-min.css&" +
"3.0.0/build/cssfonts/fonts-min.css&" +
"3.0.0/build/cssbase/base-min.css") | e) }}
{{ html.cssimport(make_relative("/media/css/style.css")) }}
{{ html.cssimport(make_relative("/media/css/pygments.css")) }}
{% endblock %}
{% block js %}{% endblock %}
{% block analytics %}
{% if 'google-analytics' in config %}
<!-- Google Analytics -->
<script type="text/javascript">
var _gaq = _gaq || [];
_gaq.push(['_setAccount', '{{ config['google-analytics'] }}']);
_gaq.push(['_trackPageview']);
(function() {
var ga = document.createElement('script'); ga.type = 'text/javascript'; ga.async = true;
ga.src = ('https:' == document.location.protocol ? 'https://ssl' : 'http://www') + '.google-analytics.com/ga.js';
(document.getElementsByTagName('head')[0] || document.getElementsByTagName('body')[0]).appendChild(ga);
})();
</script>
{% endif %}
{% endblock analytics %}
{% endblock head %}
</head>
<body {% block body_attrs %}{% endblock %}>
{% block body %}
{% block body_header %}
{% block crumbs %}
{{ breadcrumbs.crumbs(crumbs) }}
{% endblock crumbs %}
{% endblock body_header %}
<div id="content">
{% block content_header %}
{% endblock content_header %}
{% block content %}
{% endblock content %}
{% block content_footer %}
{% endblock content_footer %}
<hr class="clear" />
</div> <!-- div#content -->
{% block body_footer %}
<div id="footer">
<p>
{% if 'wiki-name' in config %}
{{ config['wiki-name']|e }} —
{% endif %}
Powered by <a href="http://markdoc.org/">Markdoc</a>.
</p>
</div>
{% endblock body_footer %}
{% endblock body %}
<hr class="clear" />
</body>
</html>
| {
"repo_name": "zacharyvoase/markdoc",
"stars": "347",
"repo_language": "Python",
"file_name": "apache.md",
"mime_type": "text/plain"
} |
# -*- coding: utf-8 -*-
import os
import argparse
import markdoc
from markdoc.config import Config
parser = argparse.ArgumentParser(**{
'prog': 'markdoc',
'description': 'A lightweight Markdown-based wiki build tool.',
})
parser.add_argument('-v', '--version', action='version',
version=markdoc.__version__)
config = parser.add_argument('--config', '-c', default=os.getcwd(),
help="Use the specified Markdoc config (a YAML file or a directory "
"containing markdoc.yaml)")
log_level = parser.add_argument('--log-level', '-l', metavar='LEVEL',
default='INFO', choices='DEBUG INFO WARN ERROR'.split(),
help="Choose a log level from DEBUG, INFO (default), WARN or ERROR")
quiet = parser.add_argument('--quiet', '-q',
action='store_const', dest='log_level', const='ERROR',
help="Alias for --log-level ERROR")
verbose = parser.add_argument('--verbose',
action='store_const', dest='log_level', const='DEBUG',
help="Alias for --log-level DEBUG")
subparsers = parser.add_subparsers(dest='command', title='commands', metavar='COMMAND')
| {
"repo_name": "zacharyvoase/markdoc",
"stars": "347",
"repo_language": "Python",
"file_name": "apache.md",
"mime_type": "text/plain"
} |
# -*- coding: utf-8 -*-
import logging
import os
import argparse
from markdoc.cli import commands
from markdoc.cli.parser import parser
from markdoc.config import Config, ConfigNotFound
def main(cmd_args=None):
"""The main entry point for running the Markdoc CLI."""
if cmd_args is not None:
args = parser.parse_args(cmd_args)
else:
args = parser.parse_args()
if args.command != 'init':
try:
args.config = os.path.abspath(args.config)
if os.path.isdir(args.config):
config = Config.for_directory(args.config)
elif os.path.isfile(args.config):
config = Config.for_file(args.config)
else:
raise ConfigNotFound("Couldn't locate Markdoc config.")
except ConfigNotFound, exc:
parser.error(str(exc))
else:
config = None
logging.getLogger('markdoc').setLevel(getattr(logging, args.log_level))
command = getattr(commands, args.command.replace('-', '_'))
return command(config, args)
if __name__ == '__main__':
main()
| {
"repo_name": "zacharyvoase/markdoc",
"stars": "347",
"repo_language": "Python",
"file_name": "apache.md",
"mime_type": "text/plain"
} |
# -*- coding: utf-8 -*-
import codecs
from functools import wraps
import logging
import os
import os.path as p
import pprint
import re
import shutil
import subprocess
import sys
import markdoc
from markdoc.builder import Builder
from markdoc.cli.parser import subparsers
def command(function):
"""Decorator/wrapper to declare a function as a Markdoc CLI task."""
cmd_name = function.__name__.replace('_', '-')
help = (function.__doc__ or '').rstrip('.') or None
parser = subparsers.add_parser(cmd_name, help=help)
@wraps(function)
def wrapper(config, args):
logging.getLogger('markdoc').debug('Running markdoc.%s' % cmd_name)
return function(config, args)
wrapper.parser = parser
return wrapper
## Utilities
@command
def show_config(config, args):
"""Pretty-print the current Markdoc configuration."""
pprint.pprint(config)
@command
def init(_, args):
"""Initialize a new Markdoc repository."""
log = logging.getLogger('markdoc.init')
if not args.destination:
log.info('No destination specified; using current directory')
destination = os.getcwd()
else:
destination = p.abspath(args.destination)
if p.exists(destination) and os.listdir(destination):
init.parser.error("destination isn't empty")
elif not p.exists(destination):
log.debug('makedirs %s' % destination)
os.makedirs(destination)
elif not p.isdir(destination):
init.parser.error("destination isn't a directory")
log.debug('mkdir %s/.templates/' % destination)
os.makedirs(p.join(destination, '.templates'))
log.debug('mkdir %s/static/' % destination)
os.makedirs(p.join(destination, 'static'))
log.debug('mkdir %s/wiki/' % destination)
os.makedirs(p.join(destination, 'wiki'))
log.debug('Creating markdoc.yaml file')
config_filename = p.join(destination, 'markdoc.yaml')
fp = open(config_filename, 'w')
try:
fp.write('{}\n')
finally:
fp.close()
if args.vcs_ignore:
config = markdoc.config.Config.for_directory(destination)
args = vcs_ignore.parser.parse_args([args.vcs_ignore])
vcs_ignore(config, args)
log.info('Wiki initialization complete')
log.info('Your new wiki is at: %s' % destination)
init.parser.add_argument('destination', default=None,
help="Create wiki here (if omitted, defaults to current directory)")
init.parser.add_argument('--vcs-ignore', choices=['hg', 'git', 'cvs', 'bzr'],
help="Create an ignore file for the specified VCS.")
@command
def vcs_ignore(config, args):
"""Create a VCS ignore file for a wiki."""
log = logging.getLogger('markdoc.vcs-ignore')
log.debug('Creating ignore file for %s' % args.vcs)
wiki_root = config['meta.root'] # shorter local alias.
ignore_file_lines = []
ignore_file_lines.append(p.relpath(config.html_dir, start=wiki_root))
ignore_file_lines.append(p.relpath(config.temp_dir, start=wiki_root))
if args.vcs == 'hg':
ignore_file_lines.insert(0, 'syntax: glob')
ignore_file_lines.insert(1, '')
if args.output == '-':
log.debug('Writing ignore file to stdout')
fp = sys.stdout
else:
if not args.output:
filename = p.join(wiki_root, '.%signore' % args.vcs)
else:
filename = p.join(wiki_root, args.output)
log.info('Writing ignore file to %s' % p.relpath(filename, start=wiki_root))
fp = open(filename, 'w')
try:
fp.write('\n'.join(ignore_file_lines) + '\n')
finally:
if fp is not sys.stdout:
fp.close()
log.debug('Ignore file written.')
vcs_ignore.parser.add_argument('vcs', default='hg', nargs='?',
choices=['hg', 'git', 'cvs', 'bzr'],
help="Create ignore file for specified VCS (default 'hg')")
vcs_ignore.parser.add_argument('-o', '--output', default=None, metavar='FILENAME',
help="Write output to the specified filename, relative to the wiki root. "
"Default is to generate the filename from the VCS. "
"'-' will write to stdout.")
## Cleanup
@command
def clean_html(config, args):
"""Clean built HTML from the HTML root."""
log = logging.getLogger('markdoc.clean-html')
if p.exists(config.html_dir):
log.debug('rm -Rf %s' % config.html_dir)
shutil.rmtree(config.html_dir)
log.debug('makedirs %s' % config.html_dir)
os.makedirs(config.html_dir)
@command
def clean_temp(config, args):
"""Clean built HTML from the temporary directory."""
log = logging.getLogger('markdoc.clean-temp')
if p.exists(config.temp_dir):
log.debug('rm -Rf %s' % config.temp_dir)
shutil.rmtree(config.temp_dir)
log.debug('makedirs %s' % config.temp_dir)
os.makedirs(config.temp_dir)
## Synchronization
@command
def sync_static(config, args):
"""Sync static files into the HTML root."""
log = logging.getLogger('markdoc.sync-static')
if not p.exists(config.html_dir):
log.debug('makedirs %s' % config.html_dir)
os.makedirs(config.html_dir)
command = ('rsync -vaxq --cvs-exclude --ignore-errors --include=.htaccess --exclude=.* --exclude=_*').split()
display_cmd = command[:]
if config['use-default-static']:
# rsync needs the paths to have trailing slashes to work correctly.
command.append(p.join(markdoc.default_static_dir, ''))
display_cmd.append(p.basename(markdoc.default_static_dir) + '/')
if not config['cvs-exclude']:
command.remove('--cvs-exclude')
display_cmd.remove('--cvs-exclude')
if p.isdir(config.static_dir):
command.append(p.join(config.static_dir, ''))
display_cmd.append(p.basename(config.static_dir) + '/')
command.append(p.join(config.html_dir, ''))
display_cmd.append(p.basename(config.html_dir) + '/')
log.debug(subprocess.list2cmdline(display_cmd))
subprocess.check_call(command)
log.debug('rsync completed')
@command
def sync_html(config, args):
"""Sync built HTML and static media into the HTML root."""
log = logging.getLogger('markdoc.sync-html')
if not p.exists(config.html_dir):
log.debug('makedirs %s' % config.html_dir)
os.makedirs(config.html_dir)
command = ('rsync -vaxq --cvs-exclude --delete --ignore-errors --include=.htaccess --exclude=.* --exclude=_*').split()
display_cmd = command[:]
# rsync needs the paths to have trailing slashes to work correctly.
command.append(p.join(config.temp_dir, ''))
display_cmd.append(p.basename(config.temp_dir) + '/')
if config['use-default-static']:
command.append(p.join(markdoc.default_static_dir, ''))
display_cmd.append(p.basename(markdoc.default_static_dir) + '/')
if not config['cvs-exclude']:
command.remove('--cvs-exclude')
display_cmd.remove('--cvs-exclude')
if p.isdir(config.static_dir):
command.append(p.join(config.static_dir, ''))
display_cmd.append(p.basename(config.static_dir) + '/')
command.append(p.join(config.html_dir, ''))
display_cmd.append(p.basename(config.html_dir) + '/')
log.debug(subprocess.list2cmdline(display_cmd))
subprocess.check_call(command)
log.debug('rsync completed')
## Building
@command
def build(config, args):
"""Compile wiki to HTML and sync to the HTML root."""
log = logging.getLogger('markdoc.build')
clean_temp(config, args)
builder = Builder(config)
for rel_filename in builder.walk():
html = builder.render_document(rel_filename)
out_filename = p.join(config.temp_dir,
p.splitext(rel_filename)[0] + p.extsep + 'html')
if not p.exists(p.dirname(out_filename)):
log.debug('makedirs %s' % p.dirname(out_filename))
os.makedirs(p.dirname(out_filename))
log.debug('Creating %s' % p.relpath(out_filename, start=config.temp_dir))
fp = codecs.open(out_filename, 'w', encoding='utf-8')
try:
fp.write(html)
finally:
fp.close()
sync_html(config, args)
build_listing(config, args)
@command
def build_listing(config, args):
"""Create listings for all directories in the HTML root (post-build)."""
log = logging.getLogger('markdoc.build-listing')
list_basename = config['listing-filename']
builder = Builder(config)
generate_listing = config.get('generate-listing', 'always').lower()
always_list = True
if generate_listing == 'never':
log.debug("No listing generated (generate-listing == never)")
return # No need to continue.
for fs_dir, _, _ in os.walk(config.html_dir):
index_file_exists = any([
p.exists(p.join(fs_dir, 'index.html')),
p.exists(p.join(fs_dir, 'index'))])
directory = '/' + '/'.join(p.relpath(fs_dir, start=config.html_dir).split(p.sep))
if directory == '/' + p.curdir:
directory = '/'
if (generate_listing == 'sometimes') and index_file_exists:
log.debug("No listing generated for %s" % directory)
continue
log.debug("Generating listing for %s" % directory)
listing = builder.render_listing(directory)
list_filename = p.join(fs_dir, list_basename)
fp = codecs.open(list_filename, 'w', encoding='utf-8')
try:
fp.write(listing)
finally:
fp.close()
if not index_file_exists:
log.debug("cp %s/%s %s/%s" % (directory, list_basename, directory, 'index.html'))
shutil.copyfile(list_filename, p.join(fs_dir, 'index.html'))
## Serving
IPV4_RE = re.compile(r'^(25[0-5]|2[0-4]\d|[0-1]?\d?\d)(\.(25[0-5]|2[0-4]\d|[0-1]?\d?\d)){3}$')
@command
def serve(config, args):
"""Serve the built HTML from the HTML root."""
# This should be a lazy import, otherwise it'll slow down the whole CLI.
from markdoc.wsgi import MarkdocWSGIApplication
log = logging.getLogger('markdoc.serve')
app = MarkdocWSGIApplication(config)
config['server.port'] = args.port
config['server.num-threads'] = args.num_threads
if args.server_name:
config['server.name'] = args.server_name
config['server.request-queue-size'] = args.queue_size
config['server.timeout'] = args.timeout
if args.interface:
if not IPV4_RE.match(args.interface):
serve.parser.error('invalid interface specifier: %r' % args.interface)
config['server.bind'] = args.interface
server = config.server_maker()(app)
try:
log.info('Serving on http://%s:%d' % server.bind_addr)
server.start()
except KeyboardInterrupt:
log.debug('Interrupted')
finally:
log.info('Shutting down gracefully')
server.stop()
serve.parser.add_argument('-p', '--port', type=int, default=8008,
help="Listen on specified port (default is 8008)")
serve.parser.add_argument('-i', '--interface', default=None,
help="Bind to specified interface (defaults to loopback only)")
serve.parser.add_argument('-t', '--num-threads', type=int, default=10, metavar='N',
help="Use N threads to handle requests (default is 10)")
serve.parser.add_argument('-n', '--server-name', default=None, metavar='NAME',
help="Use an explicit server name (default to an autodetected value)")
serve.parser.add_argument('-q', '--queue-size', type=int, default=5, metavar='SIZE',
help="Set request queue size (default is 5)")
serve.parser.add_argument('--timeout', type=int, default=10,
help="Set the socket timeout for connections (default is 10)")
| {
"repo_name": "zacharyvoase/markdoc",
"stars": "347",
"repo_language": "Python",
"file_name": "apache.md",
"mime_type": "text/plain"
} |
# -*- coding: utf-8 -*-
from builder_fixture import setup_test, teardown_test
| {
"repo_name": "zacharyvoase/markdoc",
"stars": "347",
"repo_language": "Python",
"file_name": "apache.md",
"mime_type": "text/plain"
} |
# -*- coding: utf-8 -*-
from builder_fixture import setup_test, teardown_test
| {
"repo_name": "zacharyvoase/markdoc",
"stars": "347",
"repo_language": "Python",
"file_name": "apache.md",
"mime_type": "text/plain"
} |
# -*- coding: utf-8 -*-
import os.path as p
from common import get_temporary_config, clean_temporary_config
def setup_test(test):
test.globs['CONFIG'] = get_temporary_config()
test.globs['WIKI_ROOT'] = p.join(test.globs['CONFIG']['meta.root'], '')
def teardown_test(test):
clean_temporary_config(test.globs['CONFIG'])
| {
"repo_name": "zacharyvoase/markdoc",
"stars": "347",
"repo_language": "Python",
"file_name": "apache.md",
"mime_type": "text/plain"
} |
>>> import os.path as p
>>> from markdoc.builder import Builder, get_title
>>> b = Builder(CONFIG)
Filesystem Interaction
======================
You can generate breadcrumbs for a document, based on its absolute path (which will be relativized to the wiki root):
>>> b.crumbs(p.join(WIKI_ROOT, 'wiki', 'index.md'))
[('index', None)]
>>> b.crumbs(p.join(WIKI_ROOT, 'wiki', 'somefile.md'))
[('index', '/'), ('somefile', None)]
>>> b.crumbs(p.join(WIKI_ROOT, 'wiki', 'somedir', 'index.md'))
[('index', '/'), ('somedir', None)]
>>> b.crumbs(p.join(WIKI_ROOT, 'wiki', 'somedir', 'something.md'))
[('index', '/'), ('somedir', '/somedir/'), ('something', None)]
You can also just use relative paths (relative to `WIKI_ROOT/wiki`):
>>> b.crumbs('index.md')
[('index', None)]
>>> b.crumbs('somefile.md')
[('index', '/'), ('somefile', None)]
>>> b.crumbs(p.join('somedir', 'index.md'))
[('index', '/'), ('somedir', None)]
>>> b.crumbs(p.join('somedir', 'something.md'))
[('index', '/'), ('somedir', '/somedir/'), ('something', None)]
You can also walk through all files in the wiki, using the `walk()` method:
>>> import os
>>> for filepath in b.walk():
... print filepath
an_empty_file.md
file1.md
file2.md
file3.md
subdir/hello.md
Rendering
=========
You can render documents with the `Builder.render()` method:
>>> b.render('file1.md')
u'<h1>Hello</h1>'
>>> b.render('file2.md')
u'<h1>World</h1>'
>>> b.render('file3.md')
u'<h1>Foo</h1>'
Titles
------
The `get_title()` function deals with retrieving titles from files. Initially, the file content itself is used to try to get a title. Either text in a specially formatted HTML comment, like this:
>>> get_title(None, '<!-- title: Some Title -->')
'Some Title'
Or the first `<h1>` element:
>>> get_title(None, '<h1>Some Title</h1>')
'Some Title'
If those both fail, the filename is used to produce a title:
>>> get_title('some_title.arbitraryextension', '')
'Some Title'
Since this is linked to the documents themselves, you can use the `title()` method on the builder instance to get the title for a given document:
>>> b.title('file1.md')
u'Hello'
>>> b.title('file2.md')
u'World'
>>> b.title('file3.md')
u'Foo'
>>> b.title('an_empty_file.md')
'An Empty File'
Documents
---------
You can render whole documents using `Builder.render_document()`:
>>> print b.render_document('file1.md')
<?xml version="1.0" encoding="utf-8"?>
<!DOCTYPE html>
<html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en">
<head>
<title>Hello</title>
<link rel="stylesheet" type="text/css" href="/example.css" />
</head>
<body>
<h1>Hello</h1>
</body>
</html>
>>> print b.render_document('file2.md')
<?xml version="1.0" encoding="utf-8"?>
<!DOCTYPE html>
<html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en">
<head>
<title>World</title>
<link rel="stylesheet" type="text/css" href="/example.css" />
</head>
<body>
<h1>World</h1>
</body>
</html>
This uses the `document.html` Jinja2 template, by default located in `WIKI_ROOT/.templates/`, to produce the documents.
| {
"repo_name": "zacharyvoase/markdoc",
"stars": "347",
"repo_language": "Python",
"file_name": "apache.md",
"mime_type": "text/plain"
} |
Set up the document cache with a 'root' directory:
>>> import os
>>> from markdoc.cache import DocumentCache
>>> cache = DocumentCache(base=CONFIG.wiki_dir)
You can fetch files from the document cache:
>>> cache.get('file1.md')
u'# Hello\n'
>>> cache.get('file2.md')
u'# World\n'
>>> cache.get('file3.md')
u'# Foo\n'
The contents of these files will then be cached:
>>> print sorted(cache.relative(path) for path in cache.cache)
['file1.md', 'file2.md', 'file3.md']
You can also fetch them without using the cache:
>>> cache.cache.clear()
>>> cache.cache
{}
>>> cache.get('file1.md', cache=False)
u'# Hello\n'
>>> cache.get('file2.md', cache=False)
u'# World\n'
>>> cache.get('file3.md', cache=False)
u'# Foo\n'
>>> cache.cache
{}
You can force the document cache to refresh its cached content for each of these files:
>>> cache.refresh_cache('file1.md')
>>> cache.refresh_cache('file2.md')
>>> cache.refresh_cache('file3.md')
>>> len(cache.cache)
3
And then make sure they actually *have* updated to the latest version:
>>> cache.has_latest_version('file1.md')
True
>>> cache.has_latest_version('file2.md')
True
>>> cache.has_latest_version('file3.md')
True
| {
"repo_name": "zacharyvoase/markdoc",
"stars": "347",
"repo_language": "Python",
"file_name": "apache.md",
"mime_type": "text/plain"
} |
To generate listings, we first need to build the wiki.
>>> from markdoc.cli.main import main
>>> try:
... main(['-c', WIKI_ROOT, '--quiet', 'build'])
... except SystemExit, exc:
... assert exc.code != 0, "An error occurred building the wiki"
The wiki should now be built in the HTML root:
>>> import os
>>> import os.path as p
>>> print '\n'.join(os.listdir(CONFIG.html_dir))
_list.html
an_empty_file.html
example.css
file1.html
file2.html
file3.html
index.html
subdir
Now we can try getting some listings. The bulk of the process occurs in the `Builder.listing_context()` method, which builds a template context dictionary for rendering the Jinja2 `listing.html` template. First we have to get a `Builder` instance:
>>> from markdoc.builder import Builder
>>> b = Builder(CONFIG)
Now we can get the listing for the HTML root itself, just by passing the empty string:
>>> import pprint
>>> pprint.pprint(b.listing_context('')) # doctest: +ELLIPSIS
{'directory': '',
'files': [{'basename': 'example.css',
'href': '/example.css',
'humansize': '27B',
'size': 27,
'slug': 'example'}],
'make_relative': <function <lambda> at 0x...>,
'pages': [{'basename': 'an_empty_file.html',
'href': '/an_empty_file',
'humansize': '248B',
'size': 248,
'slug': 'an_empty_file',
'title': 'An Empty File'},
{'basename': 'file3.html',
'href': '/file3',
'humansize': '250B',
'size': 250,
'slug': 'file3',
'title': u'Foo'},
{'basename': 'file1.html',
'href': '/file1',
'humansize': '254B',
'size': 254,
'slug': 'file1',
'title': u'Hello'},
{'basename': 'file2.html',
'href': '/file2',
'humansize': '254B',
'size': 254,
'slug': 'file2',
'title': u'World'}],
'sub_directories': [{'basename': 'subdir', 'href': '/subdir/'}]}
We can also get the listings for subdirectories, by passing in their paths, relative to the HTML root:
>>> pprint.pprint(b.listing_context('/subdir')) # doctest: +ELLIPSIS
{'directory': 'subdir',
'files': [],
'make_relative': <function <lambda> at 0x...>,
'pages': [{'basename': 'hello.html',
'href': '/subdir/hello',
'humansize': '268B',
'size': 268,
'slug': 'hello',
'title': u'Hello again.'}],
'sub_directories': []}
Note that these paths are always '/'-delimited, since they are taken to be URL paths and not filesystem paths.
| {
"repo_name": "zacharyvoase/markdoc",
"stars": "347",
"repo_language": "Python",
"file_name": "apache.md",
"mime_type": "text/plain"
} |
>>> from markdoc.config import Config
>>> EXAMPLE_KEY = '9d03e32c6cb1455388a170e3bb0c2030'
>>> EXAMPLE_VALUE = '7010d5d5871143d089cb662cb540cbd5'
You can make a config based on a directory (which will usually be a wiki root):
>>> dir_config = Config.for_directory(WIKI_ROOT)
>>> dir_config[EXAMPLE_KEY] == EXAMPLE_VALUE
True
You can also make one based on a YAML filename:
>>> import os.path as p
>>> file_config = Config.for_file(p.join(WIKI_ROOT, 'markdoc.yaml'))
>>> file_config[EXAMPLE_KEY] == EXAMPLE_VALUE
True
And if you like, you can make a complete config manually:
>>> config_dict = {
... 'meta': {'root': WIKI_ROOT},
... EXAMPLE_KEY: EXAMPLE_VALUE
... }
>>> manual_config = Config(p.join(WIKI_ROOT, 'markdoc.yaml'), config_dict)
>>> manual_config[EXAMPLE_KEY] == EXAMPLE_VALUE
True
| {
"repo_name": "zacharyvoase/markdoc",
"stars": "347",
"repo_language": "Python",
"file_name": "apache.md",
"mime_type": "text/plain"
} |
First we have to set up a Python function for running the command-line tool. Since the main entry point is the `markdoc.cli.main.main()` function, we can just call that directly, with some defaults for the current wiki root:
>>> import os
>>> import os.path as p
>>> from markdoc.cli.main import main
>>> def markdoc(*args):
... try:
... main(['-c', WIKI_ROOT] + list(args))
... except SystemExit, exc:
... return exc.code
... return 0
The `show-config` command will show the current Markdoc config:
>>> exit_code = markdoc('show-config') # doctest: +ELLIPSIS
{'9d03e32c6cb1455388a170e3bb0c2030': '7010d5d5871143d089cb662cb540cbd5',
'hide-prefix': '_',
'meta.config-file': '.../example/markdoc.yaml',
'meta.root': '.../example',
'use-default-static': False,
'use-default-templates': False}
>>> exit_code
0
`sync-static` will compile the wiki's static media into the HTML root:
>>> markdoc('--quiet', 'sync-static') # doctest: +ELLIPSIS
0
If you leave out the `--quiet` option, Markdoc will print some additional logging information, including the `rsync` command that's run in the background; the rsync output itself will also be written to the terminal.
The static media will now be in the HTML root:
>>> os.listdir(CONFIG.html_dir)
['example.css']
There are three other commands -- `clean-html`, `clean-temp`, and `sync-html` -- which are used in the background by the `build` task. `markdoc build` will compile all the HTML, and then synchronize both built HTML and static media into the HTML root (usually `WIKI_ROOT/.html`).
>>> markdoc('--quiet', 'build')
0
>>> print '\n'.join(sorted(os.listdir(CONFIG.html_dir)))
_list.html
an_empty_file.html
example.css
file1.html
file2.html
file3.html
index.html
subdir
| {
"repo_name": "zacharyvoase/markdoc",
"stars": "347",
"repo_language": "Python",
"file_name": "apache.md",
"mime_type": "text/plain"
} |
# -*- coding: utf-8 -*-
from builder_fixture import setup_test, teardown_test
| {
"repo_name": "zacharyvoase/markdoc",
"stars": "347",
"repo_language": "Python",
"file_name": "apache.md",
"mime_type": "text/plain"
} |
# -*- coding: utf-8 -*-
import os.path as p
import shutil
import tempfile
from markdoc.config import Config
def get_temporary_config():
"""
Return a temporary Markdoc configuration.
The contents of the wiki will be copied from the example Markdoc wiki. After
you're done with this, you should call `clean_temporary_config()` on the
config object.
"""
own_config_dir = p.join(p.dirname(p.abspath(__file__)), 'example') + p.sep
temp_config_dir = p.join(tempfile.mkdtemp(), 'example')
shutil.copytree(own_config_dir, temp_config_dir)
return Config.for_directory(temp_config_dir)
def clean_temporary_config(config):
"""Delete a temporary configuration's wiki root."""
shutil.rmtree(p.dirname(config['meta.root']))
| {
"repo_name": "zacharyvoase/markdoc",
"stars": "347",
"repo_language": "Python",
"file_name": "apache.md",
"mime_type": "text/plain"
} |
# -*- coding: utf-8 -*-
import os.path as p
def setup_test(test):
test.globs['WIKI_ROOT'] = p.join(p.dirname(p.abspath(__file__)), 'example')
| {
"repo_name": "zacharyvoase/markdoc",
"stars": "347",
"repo_language": "Python",
"file_name": "apache.md",
"mime_type": "text/plain"
} |
hide-prefix: "_"
use-default-templates: no
use-default-static: no
9d03e32c6cb1455388a170e3bb0c2030: 7010d5d5871143d089cb662cb540cbd5
| {
"repo_name": "zacharyvoase/markdoc",
"stars": "347",
"repo_language": "Python",
"file_name": "apache.md",
"mime_type": "text/plain"
} |
# World
| {
"repo_name": "zacharyvoase/markdoc",
"stars": "347",
"repo_language": "Python",
"file_name": "apache.md",
"mime_type": "text/plain"
} |
# Hello
| {
"repo_name": "zacharyvoase/markdoc",
"stars": "347",
"repo_language": "Python",
"file_name": "apache.md",
"mime_type": "text/plain"
} |
# Foo
| {
"repo_name": "zacharyvoase/markdoc",
"stars": "347",
"repo_language": "Python",
"file_name": "apache.md",
"mime_type": "text/plain"
} |
# Hello again.
| {
"repo_name": "zacharyvoase/markdoc",
"stars": "347",
"repo_language": "Python",
"file_name": "apache.md",
"mime_type": "text/plain"
} |
<?xml version="1.0" encoding="utf-8"?>
<!DOCTYPE html>
<html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en">
<head>
<title>{{ title }}</title>
<link rel="stylesheet" type="text/css" href="/example.css" />
</head>
<body>
{{ content }}
</body>
</html>
| {
"repo_name": "zacharyvoase/markdoc",
"stars": "347",
"repo_language": "Python",
"file_name": "apache.md",
"mime_type": "text/plain"
} |
<?xml version="1.0" encoding="utf-8"?>
<!DOCTYPE html>
<html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en">
<head>
<title>ls /{{ directory }}</title>
<link rel="stylesheet" type="text/css" href="/example.css" />
</head>
<body>
<h1>Listing for /{{ directory }}</h1>
{% if sub_directories %}
<h2>Sub-directories</h2>
<ul id="subdirs">
{% for sub_dir in sub_directories %}
<li><a href="{{ sub_dir.href }}">{{ sub_dir.basename }}</a></li>
{% endfor %}
</ul>
{% endif %}
{% if pages %}
<h2>Pages</h2>
<ul id="pages">
{% for page in pages %}
<li><a href="{{ page.href }}">{{ page.title }}</a></li>
{% endfor %}
</ul>
{% endif %}
{% if files %}
<h2>Files</h2>
<ul id="files">
{% for file in files %}
<li><a href="{{ file.href }}">{{ file.basename }}</a></li>
{% endfor %}
</ul>
{% endif %}
</body>
</html>
| {
"repo_name": "zacharyvoase/markdoc",
"stars": "347",
"repo_language": "Python",
"file_name": "apache.md",
"mime_type": "text/plain"
} |
/* Nothing to see here. */
| {
"repo_name": "zacharyvoase/markdoc",
"stars": "347",
"repo_language": "Python",
"file_name": "apache.md",
"mime_type": "text/plain"
} |
# About Markdoc
Markdoc is a project which aims to provide a lightweight alternative to large
database-powered wiki systems. I’ve listed the main goals of the project below;
I believe that, in its current state, it meets all of these.
## Goals & Philosophy
### Wikis
* Wikis should be made up of plain-text files, without requiring a running
instance of MySQL or even an SQLite database.
* There should only be one simple-to-write plain-text configuration file.
* Wikis should be VCS-friendly, yet VCS-agnostic.
* It should be possible to compile a wiki to static HTML, and then to serve
this HTML with no wiki-specific software.
### Markdown
I chose Markdown as the format for this wiki system because of its simplicity,
familiarity for many writers, and the extensibility of its Python
implementation. For example, Pygments syntax highlighting is available through a
single configuration option in the `markdoc.yaml` file. The ability to embed raw
HTML in Markdown documents gives it power and flexibility.
### Command-Line Interface
* The CLI should be intuitive and easy to use.
* There should only be a few different sub-commands, each of which does what
one would expect it to.
* There should be a full web server included, in the case that setting up a
large-scale HTTP server is impractical or impossible.
* The CLI should be pure-Python, for portability and extensibility.
## License
Markdoc is [public domain software](http://unlicense.org/).
> This is free and unencumbered software released into the public domain.
>
> Anyone is free to copy, modify, publish, use, compile, sell, or distribute
> this software, either in source code form or as a compiled binary, for any
> purpose, commercial or non-commercial, and by any means.
>
> In jurisdictions that recognize copyright laws, the author or authors of this
> software dedicate any and all copyright interest in the software to the public
> domain. We make this dedication for the benefit of the public at large and to
> the detriment of our heirs and successors. We intend this dedication to be an
> overt act of relinquishment in perpetuity of all present and future rights to
> this software under copyright law.
>
> THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
> IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
> FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
> AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
> ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
> WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
>
> For more information, please refer to <http://unlicense.org/>
The bundled Pygments style (it’s the [Github][] syntax highlighting theme) was
created by [Tom Preston-Werner][]; it was sourced from [his blog][] and is
licensed under the MIT License:
[github]: http://github.com/
[tom preston-werner]: http://tom.preston-werner.com/
[his blog]: http://github.com/mojombo/tpw/
> Copyright © 2010 Tom Preston-Werner
>
> Permission is hereby granted, free of charge, to any person
> obtaining a copy of this software and associated documentation
> files (the "Software"), to deal in the Software without
> restriction, including without limitation the rights to use,
> copy, modify, merge, publish, distribute, sublicense, and/or sell
> copies of the Software, and to permit persons to whom the
> Software is furnished to do so, subject to the following
> conditions:
>
> The above copyright notice and this permission notice shall be
> included in all copies or substantial portions of the Software.
>
> THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
> EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
> OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
> NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
> HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
> WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
> FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
> OTHER DEALINGS IN THE SOFTWARE. | {
"repo_name": "zacharyvoase/markdoc",
"stars": "347",
"repo_language": "Python",
"file_name": "apache.md",
"mime_type": "text/plain"
} |
# Authoring
A wiki would be nothing without pages. In Markdoc, pages are written in
[Markdown][df-markdown], a plain-text formatting syntax designed by
[John Gruber][df]. In his own words:
[df]: http://daringfireball.net/
[df-markdown]: http://daringfireball.net/projects/markdown/
> Markdown allows you to write using an easy-to-read, easy-to-write plain text
> format, then convert it to structurally valid XHTML (or HTML).
>
> [...]
>
> The overriding design goal for Markdown’s formatting syntax is to
> make it as readable as possible. The idea is that a Markdown-formatted
> document should be publishable as-is, as plain text, without looking
> like it’s been marked up with tags or formatting instructions.
For a comprehensive guide to the Markdown syntax, consult the
[markup reference documentation](/ref/markup). The rest of this document will
cover the Markdoc-specific conventions and constraints related to writing wiki
pages.
## Linking
Every page in your wiki will likely link to several other pages. Markdoc does
not require any special syntax for internal links; the usual Markdown format
will work. An example of a link from this very page follows:
:::text
For a comprehensive guide to the Markdown syntax,
consult the [markup reference documentation](/ref/markup).
As you can see, the link href is an absolute path to the document, without any
extension. Markdoc will process this and convert it into a relative path when
rendering the corresponding HTML. This means that you can host Markdoc wikis
under sub-directories on a web server, and the links will still work properly.
If you split your wiki up into sub-directories (for example, in this wiki, there
is an `internals/` directory), the pattern remains the same. A link to the
[internals/rendering](/internals/rendering) document looks like this:
:::text
A link to the [internals/rendering](/internals/rendering) document.
Note that links will only be made relative if they begin with a `/` character;
for example, a link to `http://www.google.com/` will be left untouched.
| {
"repo_name": "zacharyvoase/markdoc",
"stars": "347",
"repo_language": "Python",
"file_name": "apache.md",
"mime_type": "text/plain"
} |
<!-- title: Index -->
# Markdoc
Markdoc is a lightweight Markdown-based wiki system. It’s been designed to allow
you to create and manage wikis as quickly and easily as possible.
## What is it good for?
Potential use cases for Markdoc include, but aren’t limited to:
Technical Documentation/Manuals
: Markdoc can be used to write and render hand-written guides and manuals for
software. Such documentation will normally be separate from
automatically-generated API documentation, and might give a higher-level
view than API docs alone. It might be used for client documentation for
web/desktop applications, or even developer documentation for frameworks.
Internal Project Wikis
: Markdoc wikis consist of a single plain-text file per page. By combining a
wiki with a DVCS (such as [Mercurial][] or [Git][]), you can collaborate
with several other people. Use the DVCS to track, share and merge changes
with one another, and view the wiki’s history.
[Mercurial]: http://mercurial.selenic.com/
[Git]: http://git-scm.com/
Static Site Generation
: Markdoc converts wikis into raw HTML files and media. This allows you to
manage a blog, personal website or a collection of pages in a Markdoc wiki,
perhaps with custom CSS styles, and publish the rendered HTML to a website.
Markdoc need not be installed on the hosting site, since the resultant HTML
is completely independent.
## Cool Features
* Set up [Google Analytics][] tracking in one line of configuration.
* [Barebones][] wikis that just look like directories with Markdown-formatted
text files in them.
* A built-in HTTP server and WSGI application to serve up a compiled wiki with
a single command.
* Continuous builds (via `rsync`) mean the server can keep running whilst
Markdoc re-compiles the wiki. Just refresh your browser to see the changes.
* Add [Pygments][]-powered syntax highlighting to your Markdoc wiki with a
single [configuration parameter][syntax-highlighting].
* Markdoc is [public domain software][licensing]. It will always be completely
free to use, and you can redistribute it (in part or in whole) under any
circumstances (open-source, proprietary or otherwise) with no attribution or
encumberances.
[google analytics]: /ref/configuration#metadata
[barebones]: /tips/barebones
[pygments]: http://pygments.org/
[syntax-highlighting]: /tips/syntax-highlighting
[licensing]: /about#license
## Where do I start?
The [quickstart](/quickstart) document has all the information you need to put
together a simple Markdoc wiki. The [authoring](/authoring) guide provides a
quick introduction to writing Markdoc pages themselves, especially with regards
to linking between pages.
## Reference
See the [configuration](/ref/configuration) reference for in-depth knowledge on
writing your `markdoc.yaml` file. The [layout](/ref/layout) reference describes
the basic filesystem layout for a Markdoc wiki, and the [tips](/tips/) directory
contains several handy recipes.
The Markdoc project’s goals and history are described in the [about](/about)
page. If you’d like to know more about how Markdoc works at a deeper level, take
a look at the [internals directory](/internals/). Developers interested in
hacking the utility will likely want the [development](/internals/development)
page.
To see the complete list of pages in this wiki, you can browse the
[directory listing](/_list).
| {
"repo_name": "zacharyvoase/markdoc",
"stars": "347",
"repo_language": "Python",
"file_name": "apache.md",
"mime_type": "text/plain"
} |
# Quickstart
The first step towards using Markdoc is to install it. Luckily, it uses
setuptools, so you can install it with relative ease on any system with Python.
Note that most modern UNIX distributions come with a sufficiently recent version
of Python, including Mac OS X, Ubuntu (and derivatives) and Fedora.
## Requirements
The minimum requirements to run the Markdoc utility are:
* Python 2.4 or later (2.5+ highly recommended)
* A UNIX (or at least POSIX-compliant) operating system
* [rsync](http://www.samba.org/rsync/) — installed out of the box with most
modern OSes, including Mac OS X and Ubuntu. In the future Markdoc may
include a pure-Python implementation.
## Installation
You can use either `easy_install` or [pip][] to install Markdoc:
[pip]: http://pip.openplans.org/
:::bash
$ easy_install Markdoc # OR
$ pip install Markdoc
Note that you are likely to see a lot of scary-looking output from both
commands; nevertheless, you can tell whether installation was successful by
looking at the last line of the output. With `easy_install`, this should be:
:::text
Finished processing dependencies for Markdoc
And with `pip install`:
:::text
Successfully installed ... Markdoc ...
`pip` will list all of the packages it installed, and `Markdoc` should be
amongst them.
## Making a Wiki
### Initializing the Wiki
The `markdoc init` command creates a new wiki. It also accepts a `--vcs-ignore`
option which will automatically create the appropriate ignore file for your VCS.
:::bash
$ markdoc init my-wiki --vcs-ignore hg
markdoc.vcs-ignore: INFO: Writing ignore file to .hgignore
markdoc.init: INFO: Wiki initialization complete
markdoc.init: INFO: Your new wiki is at: .../my-wiki
If you’re using SVN, you have to take a few more steps to set the `svn:ignore`
property on the directory:
:::bash
$ markdoc init my-wiki --vcs-ignore cvs
markdoc.vcs-ignore: INFO: Writing ignore file to .cvsignore
markdoc.init: INFO: Wiki initialization complete
markdoc.init: INFO: Your new wiki is at: .../my-wiki
$ cd my-wiki/
$ svn propset svn:ignore -F .cvsignore
$ rm .cvsignore
### Editing Pages
Documents in a Markdoc wiki are located under the `wiki/` subdirectory, and are
plain Markdown files. Typically documents have a `.md` file extension, but in
the [wiki configuration](/configuration#building) you can specify others.
:::bash
$ cd my-wiki/
$ vim wiki/somefile.md
# ... write some documents ...
### Building
Markdoc comes with a default set of templates and stylesheets, so you can build
your wiki right away. Just run `markdoc build`, and all the HTML will be
generated and output into the `.html/` sub-directory (known as the HTML root).
:::bash
$ markdoc build
### Serving
You can view all the HTML in a browser easily by running the built-in server.
`markdoc serve` accepts a large number of options, but on its own will serve
your documentation on port 8008.
:::bash
$ markdoc serve
markdoc.serve: INFO: Serving on http://127.0.0.1:8008
Now just open <http://localhost:8008/> in your browser, and see your new
Markdoc-powered wiki!
| {
"repo_name": "zacharyvoase/markdoc",
"stars": "347",
"repo_language": "Python",
"file_name": "apache.md",
"mime_type": "text/plain"
} |
# Rendering
This page will describe the process which documents undergo on their way from Markdoc to XHTML.
## Step 1: Markdown Rendering
Each page is converted from Markdown to XHTML. This uses the Markdown library for Python, which comes with a number of extensions that you can enable in your [configuration](/configuration). For example, I like to use the `codehilite`, `def_list` and `headerid` Markdown extensions, but by default Markdoc wikis will not use any.
The Markdown conversion results in XHTML data which is not tied to a page, so it’s not enough to display to a browser. That’s where the templating comes in.
## Step 2: Template Rendering
Markdoc uses [Jinja2][] to render this partial XHTML to full XHTML documents. Jinja2 is a fast and flexible templating system for Python; if you are interested in writing your own templates, it would be wise to first consult its official documentation.
[jinja2]: http://jinja2.pocoo.org
Markdoc expects only two templates to be defined: `document.html` and `listing.html`. The way it finds these is as follows:
* It first looks for them in the `.templates/` directory inside your wiki.
* If the `use-default-templates` setting is `true` for your configuration (which it is by default), then search in the `default-templates` directory bundled with Markdoc.
If `use-default-templates` is `false` and the templates are not defined in your wiki’s template directory, Markdoc will eventually raise an error.
### Documents
`document.html` is used to convert the partial XHTML for a Markdown document into full, browser-ready XHTML. It receives a context much like the following:
:::python
{
"content": "<h1>...", # The XHTML for the document.
"title": "Some Document", # The extracted title of the document.
"crumbs": [("index", "/"), ("some-document", None)] # Breadcrumbs
}
The `config` variable is also (globally) set to the configuration dictionary for the current wiki.
Take a look inside the `src/markdoc/static/default-templates/markdoc-default/` directory for examples of complete templates.
### Listings
`listing.html` is used to generate listings for directories. This will only be used if `generate-listing` is set to either `always` or `sometimes` in the configuration (by default it is `always`).
Listings are a little more complex to do, so they are generated after the complete set of documents have been rendered and synced (along with static media) to the HTML root. This means you get complete listings for all of your directories, including those which came from static media.
The `listing.html` template is passed a context like this:
:::python
{"directory": "somedir",
"crumbs": [("index", "/"),
("somedir", "/somedir/"),
(jinja2.Markup('<span class="list-crumb">list</span>'), None)],
"files": [{"basename": "example.css",
"href": "/example.css",
"humansize": "27B",
"size": 27,
"slug": "example"}],
"pages": [{"basename": "hello.html",
"href": "/subdir/hello",
"humansize": "268B",
"size": 268,
"slug": "hello",
"title": u"Hello again."}],
"sub_directories": [{"basename": "subdir", "href": "/subdir/"}]}
The distinction between *files* and *pages* is useful because you can display links to pages with their full titles; if you’re viewing this now in a browser, just head to [/_list](/_list) to see what I mean. The filename which the list is output to can be configured with the `listing-filename` setting; this defaults to `_list.html` (hence `/_list`). You can also try `/media/_list`, et cetera.
The last crumb is special in that it displays the string `"list"` but with a class of `list-crumb`; in the default templates and media this is displayed in a light grey to indicate that it is a special page.
The semantics of listing generation are determined by the `generate-listing` setting; `always` will always generate a listing (even if it clobbers an existing file called `_list.html`), `sometimes` will only generate a listing when there is no `index.html` file for a directory, and `never` will never generate listings.
## Relative Links
For portability, all URLs pointing to files and pages within the current Markdoc wiki should be relative. This allows built wiki HTML to be hosted under a sub-directory and still maintain link integrity.
In practice, this is achieved in two parts:
* A Markdown extension which causes absolute path URLs in links (such as
`/path/to/somefile`) to be converted to relative ones (like `../somefile`).
* A callable passed to every template context which, when called with an
absolute path URL, will convert it to a relative one. This variable is
`make_relative()`, and an example of its use can be seen in this snippet
from the default base template:
:::text
<head>
<!--...snip...-->
{% import "macros/html" as html -%}
{{ html.cssimport(make_relative("/media/css/reset.css")) }}
{{ html.cssimport(make_relative("/media/css/layout.css")) }}
{{ html.cssimport(make_relative("/media/css/typography.css")) }}
{{ html.cssimport(make_relative("/media/css/pygments.css")) }}
</head>
If you override the default templates, make sure to use this callable to
relativize the media URLs and breadcrumb links.
| {
"repo_name": "zacharyvoase/markdoc",
"stars": "347",
"repo_language": "Python",
"file_name": "apache.md",
"mime_type": "text/plain"
} |
# Development
Markdoc is actively developed via [GitHub][gh-markdoc].
[gh-markdoc]: http://github.com/zacharyvoase/markdoc
## Working with the Repository
You’ll need to install [Git][git] first; check your OS’s package manager for a
`git` or `git-core` package.
[git]: http://git-scm.com/
You can check out a copy of the repository by cloning it:
:::bash
$ git clone git://github.com/zacharyvoase/markdoc.git
$ cd markdoc/
### Repo Structure
There are several files and directories in the root of the repo:
:::text
markdoc/
|-- doc/
|-- src/
|-- test/
|-- UNLICENSE
|-- MANIFEST.in
|-- README
|-- REQUIREMENTS
|-- distribute_setup.py
|-- nose.cfg
`-- setup.py
`doc/`
: A Markdoc wiki containing Markdoc’s own documentation. How very meta.
`src/`
: The home of all of Markdoc’s Python code.
`test/`, `nose.cfg`
: Markdoc’s tests (Python + Doctests) and nose configuration. [Nose][] is a
Python utility to automate and simplify running complex test suites.
`UNLICENSE`
: The text of the unlicense which designates Markdoc as public domain software.
`MANIFEST.in`, `setup.py`, `distribute_setup.py`
: The necessary Python packaging machinery, so you can run
`easy_install Markdoc`.
`README`
: Doesn’t need an explanation.
`REQUIREMENTS`
: A text file listing all of Markdoc’s requirements, suitable for use with
`pip install -r REQUIREMENTS`. [pip][] is a next-generation `easy_install`
replacement for Python.
[pip]: http://pip.openplans.org/
[nose]: http://somethingaboutorange.com/mrl/projects/nose/0.11.1/
### Bug Reporting and Feature Requests
All bugs and feature requests are handled on the [GitHub issues page](http://github.com/zacharyvoase/markdoc/issues).
### Contributing
If you’re interested in implementing a feature or extension for Markdoc, just fork the GitHub repository, work on the feature, commit your changes, then send me a pull request. If I like what you’ve done, I’ll pull your changes into the official Markdoc release and give you author credit.
Remember that you must be willing to release your changes to the public domain. If you are submitting a non-trivial patch, take a look at [unlicense.org][unlicensing contributions] for detailed instructions; for now, you just need to agree to the following statement:
[unlicensing contributions]: http://unlicense.org/#unlicensing-contributions
:::text
I dedicate any and all copyright interest in this software to the
public domain. I make this dedication for the benefit of the public at
large and to the detriment of my heirs and successors. I intend this
dedication to be an overt act of relinquishment in perpetuity of all
present and future rights to this software under copyright law.
| {
"repo_name": "zacharyvoase/markdoc",
"stars": "347",
"repo_language": "Python",
"file_name": "apache.md",
"mime_type": "text/plain"
} |
# Markdown Syntax Guide
Adapted from <http://daringfireball.net/projects/markdown/syntax>.
[TOC]
* * *
## Overview
### Philosophy
Markdown is intended to be as easy-to-read and easy-to-write as is feasible.
Readability, however, is emphasized above all else. A Markdown-formatted
document should be publishable as-is, as plain text, without looking like it's
been marked up with tags or formatting instructions. While Markdown's syntax has
been influenced by several existing text-to-HTML filters -- including
[Setext] [1], [atx] [2], [Textile] [3], [reStructuredText] [4], [Grutatext] [5],
and [EtText] [6] -- the single biggest source of inspiration for Markdown's
syntax is the format of plain text email.
[1]: http://docutils.sourceforge.net/mirror/setext.html
[2]: http://www.aaronsw.com/2002/atx/
[3]: http://textism.com/tools/textile/
[4]: http://docutils.sourceforge.net/rst.html
[5]: http://www.triptico.com/software/grutatxt.html
[6]: http://ettext.taint.org/doc/
To this end, Markdown's syntax is comprised entirely of punctuation characters,
which punctuation characters have been carefully chosen so as to look like what
they mean. E.g., asterisks around a word actually look like \*emphasis\*.
Markdown lists look like, well, lists. Even blockquotes look like quoted
passages of text, assuming you've ever used email.
### Inline HTML
Markdown's syntax is intended for one purpose: to be used as a format for
*writing* for the web.
Markdown is not a replacement for HTML, or even close to it. Its syntax is very
small, corresponding only to a very small subset of HTML tags. The idea is *not*
to create a syntax that makes it easier to insert HTML tags. In my opinion, HTML
tags are already easy to insert. The idea for Markdown is to make it easy to
read, write, and edit prose. HTML is a *publishing* format; Markdown is a
*writing* format. Thus, Markdown's formatting syntax only addresses issues that
can be conveyed in plain text.
For any markup that is not covered by Markdown's syntax, you simply use HTML
itself. There's no need to preface it or delimit it to indicate that you're
switching from Markdown to HTML; you just use the tags.
The only restrictions are that block-level HTML elements -- e.g. `<div>`,
`<table>`, `<pre>`, `<p>`, etc. -- must be separated from surrounding content by
blank lines, and the start and end tags of the block should not be indented with
tabs or spaces. Markdown is smart enough not to add extra (unwanted) `<p>` tags
around HTML block-level tags.
For example, to add an HTML table to a Markdown article:
:::text
This is a regular paragraph.
<table>
<tr>
<td>Foo</td>
</tr>
</table>
This is another regular paragraph.
Note that Markdown formatting syntax is not processed within block-level HTML
tags. E.g., you can't use Markdown-style `*emphasis*` inside an HTML block.
Span-level HTML tags -- e.g. `<span>`, `<cite>`, or `<del>` -- can be used
anywhere in a Markdown paragraph, list item, or header. If you want, you can
even use HTML tags instead of Markdown formatting; e.g. if you'd prefer to use
HTML `<a>` or `<img>` tags instead of Markdown's link or image syntax, go right
ahead.
Unlike block-level HTML tags, Markdown syntax *is* processed within span-level
tags.
### Automatic Escaping for Special Characters
In HTML, there are two characters that demand special treatment: `<` and `&`.
Left angle brackets are used to start tags; ampersands are used to denote HTML
entities. If you want to use them as literal characters, you must escape them as
entities, e.g. `<`, and `&`.
Ampersands in particular are bedeviling for web writers. If you want to write
about 'AT&T', you need to write '`AT&T`'. You even need to escape ampersands
within URLs. Thus, if you want to link to:
:::text
http://images.google.com/images?num=30&q=larry+bird
you need to encode the URL as:
:::text
http://images.google.com/images?num=30&q=larry+bird
in your anchor tag `href` attribute. Needless to say, this is easy to forget,
and is probably the single most common source of HTML validation errors in
otherwise well-marked-up web sites.
Markdown allows you to use these characters naturally, taking care of all the
necessary escaping for you. If you use an ampersand as part of an HTML entity,
it remains unchanged; otherwise it will be translated into `&`.
So, if you want to include a copyright symbol in your article, you can write:
:::text
©
and Markdown will leave it alone. But if you write:
:::text
AT&T
Markdown will translate it to:
:::text
AT&T
Similarly, because Markdown supports [inline HTML](#inline-html), if you use
angle brackets as delimiters for HTML tags, Markdown will treat them as such.
But if you write:
:::text
4 < 5
Markdown will translate it to:
:::text
4 < 5
However, inside Markdown code spans and blocks, angle brackets and ampersands
are *always* encoded automatically. This makes it easy to use Markdown to write
about HTML code. (As opposed to raw HTML, which is a terrible format for writing
about HTML syntax, because every single `<` and `&` in your example code needs
to be escaped.)
* * *
## Block Elements
### Paragraphs and Line Breaks
A paragraph is simply one or more consecutive lines of text, separated by one or
more blank lines. (A blank line is any line that looks like a blank line -- a
line containing nothing but spaces or tabs is considered blank.) Normal
paragraphs should not be indented with spaces or tabs.
The implication of the "one or more consecutive lines of text" rule is that
Markdown supports "hard-wrapped" text paragraphs. This differs significantly
from most other text-to-HTML formatters (including Movable Type's "Convert Line
Breaks" option) which translate every line break character in a paragraph into a
`<br />` tag.
When you *do* want to insert a `<br />` break tag using Markdown, you end a line
with two or more spaces, then type return.
Yes, this takes a tad more effort to create a `<br />`, but a simplistic "every
line break is a `<br />`" rule wouldn't work for Markdown. Markdown's
email-style [blockquoting][bq] and multi-paragraph [list items][l] work best --
and look better -- when you format them with hard breaks.
[bq]: #blockquote
[l]: #list
### Headers
Markdown supports two styles of headers, [Setext] [1] and [atx] [2].
Setext-style headers are "underlined" using equal signs (for first-level
headers) and dashes (for second-level headers). For example:
:::text
This is an H1
=============
This is an H2
-------------
Any number of underlining `=`'s or `-`'s will work.
Atx-style headers use 1-6 hash characters at the start of the line,
corresponding to header levels 1-6. For example:
:::text
# This is an H1
## This is an H2
###### This is an H6
Optionally, you may "close" atx-style headers. This is purely cosmetic -- you
can use this if you think it looks better. The closing hashes don't even need to
match the number of hashes used to open the header. (The number of opening
hashes determines the header level.) :
:::text
# This is an H1 #
## This is an H2 ##
### This is an H3 ######
### Blockquotes
Markdown uses email-style `>` characters for blockquoting. If you're familiar
with quoting passages of text in an email message, then you know how to create a
blockquote in Markdown. It looks best if you hard wrap the text and put a `>`
before every line:
:::text
> This is a blockquote with two paragraphs. Lorem ipsum dolor sit amet,
> consectetuer adipiscing elit. Aliquam hendrerit mi posuere lectus.
> Vestibulum enim wisi, viverra nec, fringilla in, laoreet vitae, risus.
>
> Donec sit amet nisl. Aliquam semper ipsum sit amet velit. Suspendisse
> id sem consectetuer libero luctus adipiscing.
Markdown allows you to be lazy and only put the `>` before the first line of a
hard-wrapped paragraph:
:::text
> This is a blockquote with two paragraphs. Lorem ipsum dolor sit amet,
consectetuer adipiscing elit. Aliquam hendrerit mi posuere lectus.
Vestibulum enim wisi, viverra nec, fringilla in, laoreet vitae, risus.
> Donec sit amet nisl. Aliquam semper ipsum sit amet velit. Suspendisse
id sem consectetuer libero luctus adipiscing.
Blockquotes can be nested (i.e. a blockquote-in-a-blockquote) by adding
additional levels of `>`:
:::text
> This is the first level of quoting.
>
> > This is nested blockquote.
>
> Back to the first level.
Blockquotes can contain other Markdown elements, including headers, lists, and
code blocks:
:::text
> ## This is a header.
>
> 1. This is the first list item.
> 2. This is the second list item.
>
> Here's some example code:
>
> return shell_exec("echo $input | $markdown_script");
Any decent text editor should make email-style quoting easy. For example, with
BBEdit, you can make a selection and choose Increase Quote Level from the Text
menu.
### Lists
Markdown supports ordered (numbered) and unordered (bulleted) lists.
Unordered lists use asterisks, pluses, and hyphens -- interchangably -- as list
markers:
:::text
* Red
* Green
* Blue
is equivalent to:
:::text
+ Red
+ Green
+ Blue
and:
:::text
- Red
- Green
- Blue
Ordered lists use numbers followed by periods:
:::text
1. Bird
2. McHale
3. Parish
It's important to note that the actual numbers you use to mark the list have no
effect on the HTML output Markdown produces. The HTML Markdown produces from the
above list is:
:::text
<ol>
<li>Bird</li>
<li>McHale</li>
<li>Parish</li>
</ol>
If you instead wrote the list in Markdown like this:
:::text
1. Bird
1. McHale
1. Parish
or even:
:::text
3. Bird
1. McHale
8. Parish
you'd get the exact same HTML output. The point is, if you want to, you can use
ordinal numbers in your ordered Markdown lists, so that the numbers in your
source match the numbers in your published HTML. But if you want to be lazy, you
don't have to.
If you do use lazy list numbering, however, you should still start the list with
the number 1. At some point in the future, Markdown may support starting ordered
lists at an arbitrary number.
List markers typically start at the left margin, but may be indented by up to
three spaces. List markers must be followed by one or more spaces or a tab.
To make lists look nice, you can wrap items with hanging indents:
:::text
* Lorem ipsum dolor sit amet, consectetuer adipiscing elit.
Aliquam hendrerit mi posuere lectus. Vestibulum enim wisi,
viverra nec, fringilla in, laoreet vitae, risus.
* Donec sit amet nisl. Aliquam semper ipsum sit amet velit.
Suspendisse id sem consectetuer libero luctus adipiscing.
But if you want to be lazy, you don't have to:
:::text
* Lorem ipsum dolor sit amet, consectetuer adipiscing elit.
Aliquam hendrerit mi posuere lectus. Vestibulum enim wisi,
viverra nec, fringilla in, laoreet vitae, risus.
* Donec sit amet nisl. Aliquam semper ipsum sit amet velit.
Suspendisse id sem consectetuer libero luctus adipiscing.
If list items are separated by blank lines, Markdown will wrap the items in
`<p>` tags in the HTML output. For example, this input:
:::text
* Bird
* Magic
will turn into:
:::html
<ul>
<li>Bird</li>
<li>Magic</li>
</ul>
But this:
:::text
* Bird
* Magic
will turn into:
:::html
<ul>
<li><p>Bird</p></li>
<li><p>Magic</p></li>
</ul>
List items may consist of multiple paragraphs. Each subsequent paragraph in a
list item must be indented by either 4 spaces or one tab:
:::text
1. This is a list item with two paragraphs. Lorem ipsum dolor
sit amet, consectetuer adipiscing elit. Aliquam hendrerit
mi posuere lectus.
Vestibulum enim wisi, viverra nec, fringilla in, laoreet
vitae, risus. Donec sit amet nisl. Aliquam semper ipsum
sit amet velit.
2. Suspendisse id sem consectetuer libero luctus adipiscing.
It looks nice if you indent every line of the subsequent paragraphs, but here
again, Markdown will allow you to be lazy:
:::text
* This is a list item with two paragraphs.
This is the second paragraph in the list item. You're
only required to indent the first line. Lorem ipsum dolor
sit amet, consectetuer adipiscing elit.
* Another item in the same list.
To put a blockquote within a list item, the blockquote's `>` delimiters need to
be indented:
:::text
* A list item with a blockquote:
> This is a blockquote
> inside a list item.
To put a code block within a list item, the code block needs to be indented
*twice* -- 8 spaces or two tabs:
:::text
* A list item with a code block:
<code goes here>
It's worth noting that it's possible to trigger an ordered list by accident, by
writing something like this:
:::text
1986. What a great season.
In other words, a *number-period-space* sequence at the beginning of a line. To
avoid this, you can backslash-escape the period:
:::text
1986\. What a great season.
### Code Blocks
Pre-formatted code blocks are used for writing about programming or markup
source code. Rather than forming normal paragraphs, the lines of a code block
are interpreted literally. Markdown wraps a code block in both `<pre>` and
`<code>` tags.
To produce a code block in Markdown, simply indent every line of the block by at
least 4 spaces or 1 tab. For example, given this input:
:::text
This is a normal paragraph:
This is a code block.
Markdown will generate:
:::html
<p>This is a normal paragraph:</p>
<pre><code>This is a code block.
</code></pre>
One level of indentation -- 4 spaces or 1 tab -- is removed from each line of
the code block. For example, this:
:::text
Here is an example of AppleScript:
tell application "Foo"
beep
end tell
will turn into:
:::html
<p>Here is an example of AppleScript:</p>
<pre><code>tell application "Foo"
beep
end tell
</code></pre>
A code block continues until it reaches a line that is not indented (or the end
of the article).
Within a code block, ampersands (`&`) and angle brackets (`<` and `>`) are
automatically converted into HTML entities. This makes it very easy to include
example HTML source code using Markdown -- just paste it and indent it, and
Markdown will handle the hassle of encoding the ampersands and angle brackets.
For example, this:
:::text
<div class="footer">
© 2004 Foo Corporation
</div>
will turn into:
:::html
<pre><code><div class="footer">
&copy; 2004 Foo Corporation
</div>
</code></pre>
Regular Markdown syntax is not processed within code blocks. E.g., asterisks are
just literal asterisks within a code block. This means it's also easy to use
Markdown to write about Markdown's own syntax.
### Horizontal Rules
You can produce a horizontal rule tag (`<hr />`) by placing three or more
hyphens, asterisks, or underscores on a line by themselves. If you wish, you may
use spaces between the hyphens or asterisks. Each of the following lines will
produce a horizontal rule:
:::text
* * *
***
*****
- - -
---------------------------------------
* * *
## Span Elements
### Links
Markdown supports two style of links: *inline* and *reference*.
In both styles, the link text is delimited by [square brackets].
To create an inline link, use a set of regular parentheses immediately after the
link text's closing square bracket. Inside the parentheses, put the URL where
you want the link to point, along with an *optional* title for the link,
surrounded in quotes. For example:
:::text
This is [an example](http://example.com/ "Title") inline link.
[This link](http://example.net/) has no title attribute.
Will produce:
:::html
<p>This is <a href="http://example.com/" title="Title">
an example</a> inline link.</p>
<p><a href="http://example.net/">This link</a> has no
title attribute.</p>
If you're referring to a local resource on the same server, you can use relative
paths:
:::text
See my [About](/about/) page for details.
Reference-style links use a second set of square brackets, inside which you
place a label of your choosing to identify the link:
:::text
This is [an example][id] reference-style link.
You can optionally use a space to separate the sets of brackets:
:::text
This is [an example] [id] reference-style link.
Then, anywhere in the document, you define your link label like this, on a line
by itself:
:::text
[id]: http://example.com/ "Optional Title Here"
That is:
* Square brackets containing the link identifier (optionally indented from the
left margin using up to three spaces);
* followed by a colon;
* followed by one or more spaces (or tabs);
* followed by the URL for the link;
* optionally followed by a title attribute for the link, enclosed in double or
single quotes, or enclosed in parentheses.
The following three link definitions are equivalent:
:::text
[foo]: http://example.com/ "Optional Title Here"
[foo]: http://example.com/ 'Optional Title Here'
[foo]: http://example.com/ (Optional Title Here)
The link URL may, optionally, be surrounded by angle brackets:
:::text
[id]: <http://example.com/> "Optional Title Here"
You can put the title attribute on the next line and use extra spaces or tabs
for padding, which tends to look better with longer URLs:
:::text
[id]: http://example.com/longish/path/to/resource/here
"Optional Title Here"
Link definitions are only used for creating links during Markdown processing,
and are stripped from your document in the HTML output.
Link definition names may consist of letters, numbers, spaces, and punctuation
-- but they are *not* case sensitive. E.g. these two links:
:::text
[link text][a]
[link text][A]
are equivalent.
The *implicit link name* shortcut allows you to omit the name of the link, in
which case the link text itself is used as the name. Just use an empty set of
square brackets -- e.g., to link the word "Google" to the google.com web site,
you could simply write:
:::text
[Google][]
And then define the link:
:::text
[Google]: http://google.com/
Because link names may contain spaces, this shortcut even works for multiple
words in the link text:
:::text
Visit [Daring Fireball][] for more information.
And then define the link:
:::text
[Daring Fireball]: http://daringfireball.net/
Link definitions can be placed anywhere in your Markdown document. I tend to put
them immediately after each paragraph in which they're used, but if you want,
you can put them all at the end of your document, sort of like footnotes.
Here's an example of reference links in action:
:::text
I get 10 times more traffic from [Google] [1] than from
[Yahoo] [2] or [MSN] [3].
[1]: http://google.com/ "Google"
[2]: http://search.yahoo.com/ "Yahoo Search"
[3]: http://search.msn.com/ "MSN Search"
Using the implicit link name shortcut, you could instead write:
:::text
I get 10 times more traffic from [Google][] than from
[Yahoo][] or [MSN][].
[google]: http://google.com/ "Google"
[yahoo]: http://search.yahoo.com/ "Yahoo Search"
[msn]: http://search.msn.com/ "MSN Search"
Both of the above examples will produce the following HTML output:
:::html
<p>I get 10 times more traffic from <a href="http://google.com/"
title="Google">Google</a> than from
<a href="http://search.yahoo.com/" title="Yahoo Search">Yahoo</a>
or <a href="http://search.msn.com/" title="MSN Search">MSN</a>.</p>
For comparison, here is the same paragraph written using Markdown's inline link
style:
:::text
I get 10 times more traffic from [Google](http://google.com/ "Google")
than from [Yahoo](http://search.yahoo.com/ "Yahoo Search") or
[MSN](http://search.msn.com/ "MSN Search").
The point of reference-style links is not that they're easier to write. The
point is that with reference-style links, your document source is vastly more
readable. Compare the above examples: using reference-style links, the paragraph
itself is only 81 characters long; with inline-style links, it's 176 characters;
and as raw HTML, it's 234 characters. In the raw HTML, there's more markup than
there is text.
With Markdown's reference-style links, a source document much more closely
resembles the final output, as rendered in a browser. By allowing you to move
the markup-related metadata out of the paragraph, you can add links without
interrupting the narrative flow of your prose.
### Emphasis
Markdown treats asterisks (`*`) and underscores (`_`) as indicators of emphasis.
Text wrapped with one `*` or `_` will be wrapped with an HTML `<em>` tag; double
`*`'s or `_`'s will be wrapped with an HTML `<strong>` tag. E.g., this input:
:::text
*single asterisks*
_single underscores_
**double asterisks**
__double underscores__
will produce:
:::html
<em>single asterisks</em>
<em>single underscores</em>
<strong>double asterisks</strong>
<strong>double underscores</strong>
You can use whichever style you prefer; the lone restriction is that the same
character must be used to open and close an emphasis span.
Emphasis can be used in the middle of a word:
:::text
un*frigging*believable
But if you surround an `*` or `_` with spaces, it'll be treated as a literal
asterisk or underscore.
To produce a literal asterisk or underscore at a position where it would
otherwise be used as an emphasis delimiter, you can backslash escape it:
:::text
\*this text is surrounded by literal asterisks\*
### Code
To indicate a span of code, wrap it with backtick quotes (`` ` ``). Unlike a
pre-formatted code block, a code span indicates code within a normal paragraph.
For example:
:::text
Use the `printf()` function.
will produce:
:::html
<p>Use the <code>printf()</code> function.</p>
To include a literal backtick character within a code span, you can use multiple
backticks as the opening and closing delimiters:
:::text
``There is a literal backtick (`) here.``
which will produce this:
:::html
<p><code>There is a literal backtick (`) here.</code></p>
The backtick delimiters surrounding a code span may include spaces -- one after
the opening, one before the closing. This allows you to place literal backtick
characters at the beginning or end of a code span:
:::text
A single backtick in a code span: `` ` ``
A backtick-delimited string in a code span: `` `foo` ``
will produce:
:::html
<p>A single backtick in a code span: <code>`</code></p>
<p>A backtick-delimited string in a code span: <code>`foo`</code></p>
With a code span, ampersands and angle brackets are encoded as HTML entities
automatically, which makes it easy to include example HTML tags. Markdown will
turn this:
:::text
Please don't use any `<blink>` tags.
into:
:::html
<p>Please don't use any <code><blink></code> tags.</p>
You can write this:
:::text
`—` is the decimal-encoded equivalent of `—`.
to produce:
:::html
<p><code>&#8212;</code> is the decimal-encoded
equivalent of <code>&mdash;</code>.</p>
### Images
Admittedly, it's fairly difficult to devise a "natural" syntax for placing
images into a plain text document format.
Markdown uses an image syntax that is intended to resemble the syntax for links,
allowing for two styles: *inline* and *reference*.
Inline image syntax looks like this:
:::text
![Alt text](/path/to/img.jpg)
![Alt text](/path/to/img.jpg "Optional title")
That is:
* An exclamation mark: `!`;
* followed by a set of square brackets, containing the `alt` attribute text
for the image;
* followed by a set of parentheses, containing the URL or path to the image,
and an optional `title` attribute enclosed in double or single quotes.
Reference-style image syntax looks like this:
:::text
![Alt text][id]
Where "id" is the name of a defined image reference. Image references are
defined using syntax identical to link references:
:::text
[id]: url/to/image "Optional title attribute"
As of this writing, Markdown has no syntax for specifying the dimensions of an
image; if this is important to you, you can simply use regular HTML `<img>`
tags.
* * *
## Miscellaneous
### Automatic Links
Markdown supports a shortcut style for creating "automatic" links for URLs and
email addresses: simply surround the URL or email address with angle brackets.
What this means is that if you want to show the actual text of a URL or email
address, and also have it be a clickable link, you can do this:
:::text
<http://example.com/>
Markdown will turn this into:
:::html
<a href="http://example.com/">http://example.com/</a>
Automatic links for email addresses work similarly, except that Markdown will
also perform a bit of randomized decimal and hex entity-encoding to help obscure
your address from address-harvesting spambots. For example, Markdown will turn
this:
:::text
<[email protected]>
into something like this:
:::html
<a href="mailto:addre
ss@example.co
m">address@exa
mple.com</a>
which will render in a browser as a clickable link to "[email protected]".
(This sort of entity-encoding trick will indeed fool many, if not most,
address-harvesting bots, but it definitely won't fool all of them. It's better
than nothing, but an address published in this way will probably eventually
start receiving spam.)
### Backslash Escapes
Markdown allows you to use backslash escapes to generate literal characters
which would otherwise have special meaning in Markdown's formatting syntax. For
example, if you wanted to surround a word with literal asterisks (instead of an
HTML `<em>` tag), you can use backslashes before the asterisks, like this:
:::text
\*literal asterisks\*
Markdown provides backslash escapes for the following characters:
:::text
\ backslash
` backtick
* asterisk
_ underscore
{} curly braces
[] square brackets
() parentheses
# hash mark
+ plus sign
- minus sign (hyphen)
. dot
! exclamation mark
| {
"repo_name": "zacharyvoase/markdoc",
"stars": "347",
"repo_language": "Python",
"file_name": "apache.md",
"mime_type": "text/plain"
} |
# Configuration
All Markdoc wikis are configured via a single `markdoc.yaml` file in the wiki
root. This file is formatted with YAML (Yet Another Markup Language); you can
find more information on that at [yaml.org](http://yaml.org/). When running the
command-line interface, Markdoc will search for either a `markdoc.yaml` or a
`.markdoc.yaml` file in the current directory. You can also explicitly specify a
file to use with the `-c`/`--config` command-line option.
## Example
Here we’ll go through an example and show how the various settings affect
Markdoc’s behavior:
#!yaml
# Metadata
wiki-name: "My Wiki"
google-analytics: UA-XXXXXX-X
# Directories
hide-prefix: "."
wiki-dir: "wiki"
static-dir: "static"
html-dir: ".html"
template-dir: ".templates"
temp-dir: ".tmp"
cvs-exclude: true
# Building
document-extensions: [.md, .mdown, .markdown, .wiki, .text]
generate-listing: always
listing-filename: "_list.html"
use-default-static: true
use-default-templates: true
# Rendering
markdown:
safe-mode: false
output-format: xhtml1
extensions: [codehilite, def_list]
extension-configs:
codehilite:
force_linenos: true
# Serving
server:
bind: '127.0.0.1'
port: 8010
num-threads: 10
name: 'server.example.com'
request-queue-size: 5
timeout: 10
Remember that Markdoc uses sensible defaults for *all* of these options, so it’s
perfectly OK to have an empty markdoc.yaml file. You still need one though.
* [Metadata](#metadata)
* [Directories](#directories)
* [Building](#building)
* [Rendering](#rendering)
* [Serving](#serving)
### Metadata
This is information about your wiki itself. It is currently only used when
rendering the default set of templates, but custom templates may also use these
parameters.
`wiki-name` (no default)
: Specify the human-friendly name of your wiki. If defined, this will appear
in the title and footer in the default Markdoc templates.
`google-analytics` (no default)
: Specify a [Google Analytics][] tracking key for your Markdoc site. If given,
the GA tracking code will be included in every HTML document. Note that this
should be the full key, in the format `UA-XXXXXX-X`.
[google analytics]: http://google.com/analytics/
### Directories
These settings affect where Markdoc looks for some pieces of data, and where it
puts others. You can get more information on the roles of various directories in
the [layout documentation](/layout). Note that all `*-dir` parameters are
resolved relative to the wiki root, and that `'.'` (i.e. the wiki root itself)
is an acceptable value.
`hide-prefix` (default `.`)
: This determines how Markdoc finds and writes to hidden directories like
`.tmp`, `.templates` and `.html`. You may wish to set this to `'_'` if your
VCS or operating system doesn’t play nicely with period-prefixed filenames.
If you specify `html-dir`, `temp-dir` and `template-dir`, this setting won’t
have any effect.
`wiki-dir` (default `"wiki"`)
: This tells Markdoc where to find pages that should be rendered with Markdown
and output as HTML. Only files in this directory will be rendered.
`static-dir` (default `"static"`)
: Any files in the static directory are synced over to the HTML root as-is
when building. This tells Markdoc where to find all the static media for
your wiki.
`html-dir` (default `hide-prefix + "html"`)
: This is where HTML and static media are eventually output during the
building process. It is also the document root for the Markdoc server. The
default value is auto-generated using the `hide-prefix` setting.
`template-dir` (default `hide-prefix + "templates"`)
: Markdoc will look in this directory first when searching for the Jinja2
templates needed to produce HTML.
`temp-dir` (default `hide-prefix + "tmp"`)
: This directory is used as a temporary destination when building HTML.
`cvs-exclude` (default `true`)
: If this is `true`, Markdoc will pass the `--cvs-exclude` option to `rsync`
when syncing static media and rendered HTML files. This causes `rsync` to
skip some common backup/hidden files (e.g. `.git/`, `.svn/`, `*~` and `#*`).
The full semantics of this option are specified in the
[`rsync` documentation][rsync-docs].
[rsync-docs]: http://www.samba.org/ftp/rsync/rsync.html
### Building
These settings affect Markdoc’s behavior during the build process.
`document-extensions` (default `[.md, .mdown, .markdown, .wiki, .text]`)
: Markdoc will only render files from the document root which have one of
these extensions. If one of the extensions is an empty string (`''`), then
all files (including those without an extension) will be considered pages.
Setting this parameter to the empty list (`[]`) will behave as if it is
actually `['']`.
`generate-listing` (default `always`)
: This affects how listings are generated for directories in your Markdoc wiki
(including the top level). Set this to either `always`, `sometimes` or
`never`. The semantics are as follows:
* `never` never generates any listings.
* `sometimes` only generates a listing when there is no `index` or
`index.html` file in a directory. This listing is saved as both
`index.html` and the value of the `listing-filename` setting.
* `always` generates a listing for every directory, and saves it under the
value of the `listing-filename` setting, and as `index.html` if an index
file does not already exist.
`listing-filename` (default `_list.html`)
: This specifies the filename that directory listings are saved under; see the
documentation for `generate-listing` just above for more information.
`use-default-static` (default `true`)
: If true, Markdoc’s default set of static media will be synchronized to the
HTML root when building.
`use-default-templates` (default `true`)
: If true, Jinja2 will look in Markdoc’s set of default templates when
rendering documents and listings.
### Rendering
These settings determine how Markdoc converts Markdown text into XHTML. They are
all defined as sub-parameters inside the `markdown` dictionary. These parameters
correspond to keyword arguments to the `markdown.Markdown` constructor, although
hyphens (`-`) are all converted to underscores (`_`) in the key strings.
`extensions` (default `[]`)
: A list of strings specifying extensions to be used by the Markdown renderer.
The [Markdown library for Python][markdown-python-lib] comes with the
following by default:
* `abbr`
* `codehilite`
* `def_list`
* `extra`
* `fenced_code`
* `footnotes`
* `headerid`
* `html_tidy`
* `imagelinks`
* `meta`
* `rss`
* `tables`
* `toc`
* `wikilinks`
[markdown-python-lib]: http://www.freewisdom.org/projects/python-markdown
`extension-configs` (default `{}`)
: These are configuration parameters for the extensions — you’ll need to
consult the official Markdown library documentation for more information.
`safe-mode` (default `false`)
: Disallow raw HTML in Markdown documents. This can be either `false`,
`remove`, `replace` or `escape`.
`output-format` (default `xhtml1`)
: Switch between rendering XHTML or HTML. Can be either `xhtml1`, `xhtml`,
`html4` or `html` (the general ones will always refer to the latest
version). It is strongly suggested that you use XHTML.
### Serving
All of the server configuration parameters exist in the `server` dictionary (as
with `markdown` previously).
`bind` (default `127.0.0.1`)
: Bind to the specified interface. With the default value the server will only
listen on the loopback interface (localhost).
`port` (default `8008`)
: Listen on the specified port.
`num-threads` (default `10`)
: Use this number of threads to handle requests.
`name` (default is autodetected)
: Specify a server name. The default will be automatically detected from the
socket the server binds to.
`request-queue-size` (default `5`)
: Sets the number of queued connections to the server before it will start
dropping TCP connections (the `backlog` argument to `socket.listen()`).
`timeout` (default `10`)
: The socket timeout (in seconds) for accepted TCP connections.
| {
"repo_name": "zacharyvoase/markdoc",
"stars": "347",
"repo_language": "Python",
"file_name": "apache.md",
"mime_type": "text/plain"
} |
# Layout
Markdoc wikis have the following layout:
:::text
WIKI_ROOT/
|-- .html/
|-- .templates/
|-- .tmp/
|-- static/
|-- wiki/
`-- markdoc.yaml
The `.html/` and `.tmp/` directories should be excluded from any VCS, since they
contain temporary files. Here is a list of the roles of the various files and
sub-directories, in descending order of significance:
`WIKI_ROOT/`
: The *wiki root*, containing all of the files required for the `markdoc`
utility to build and serve the wiki.
`WIKI_ROOT/markdoc.yaml`
: The *wiki configuration*; the main configuration point for your wiki, in a
YAML-formatted file. Consult the [configuration docs](/configuration) for
more information on how to write this.
`WIKI_ROOT/wiki/`
: The *document root*, which contains the actual *text* of your wiki in
Markdown-formatted text files. It is assumed they are UTF-8 encoded. Any
files without a valid extension will be ignored (see the option
`document-extensions` in [configuration](/configuration)).
`WIKI_ROOT/static/`
: The *static directory*: static media files (such as CSS and JavaScript)
should be put here. They will be copied to `.html/` by rsync during the
build operation. This comes with some default CSS for styling.
`WIKI_ROOT/.templates/`
: The *template directory*: it contains the Jinja2 templates for documents,
error pages and directory listings. It comes with some nice defaults, but
you can override these if you wish.
`WIKI_ROOT/.html/`
: The *HTML root* (sometimes abbreviated as ‘htroot’). It holds the compiled
HTML and static files. It is directly served from by the Markdoc webserver.
`WIKI_ROOT/.tmp/`
: The *temporary directory*: a temporary build destination for rendered
Markdown files. This directory is then rsync’d to the HTML root along with
the static directory; the incremental nature of this operation means the
Markdoc web server can keep running in one process whilst another runs
`markdoc build`.
Note that all of the default locations for these directories can be overridden
in the `markdoc.yaml` file. For example, you may wish to use `WIKI_ROOT/pages/`
instead of `WIKI_ROOT/wiki/`, or `WIKI_ROOT/.build/` instead of
`WIKI_ROOT/.html/`. Consult the [configuration documentation](/configuration)
for more information.
| {
"repo_name": "zacharyvoase/markdoc",
"stars": "347",
"repo_language": "Python",
"file_name": "apache.md",
"mime_type": "text/plain"
} |
# Barebones Wikis
Out of the box, Markdoc supports relatively complex wikis with custom templates and static files. However, for many cases, the default media and templates are adequate, so why should a vanilla wiki require nesting like the following:
:::text
some-wiki/
|-- .templates/
|-- static/
|-- wiki/
| |-- files
| |-- go
| `-- here
`-- markdoc.yaml
Fortunately, for very simple cases where you just want to be able to render and serve a collection of Markdown-formatted files, you can do so. Begin by just creating and entering an empty directory:
:::bash
$ mkdir mywiki/
$ cd mywiki/
Now create a file called `.markdoc.yaml` containing the following YAML data:
:::yaml
wiki-name: My Wiki # Set this to whatever you want
wiki-dir: "."
static-dir: ".static"
You can add some more configuration parameters if you like, but these are the basic ones you’ll need. So now your directory structure will look like this:
:::text
mywiki/
`-- .markdoc.yaml
And you can just start creating pages in the directory.
:::text
mywiki/
|-- .markdoc.yaml
|-- page1.md
|-- page2.md
`-- page3.md
To run the build process, just do the usual:
:::bash
$ markdoc build && markdoc serve
`markdoc` recognizes both `markdoc.yaml` *and* `.markdoc.yaml` implicitly. Because you’ve hidden everything except the actual wiki pages, to most file browsers (including `ls`) the wiki will just look like a directory with a number of text files.
| {
"repo_name": "zacharyvoase/markdoc",
"stars": "347",
"repo_language": "Python",
"file_name": "apache.md",
"mime_type": "text/plain"
} |
# Syntax Highlighting with Pygments
[Markdown for Python][] supports syntax highlighting in your documents using [Pygments][]. Here’s how to set up and use it from Markdoc.
[markdown for python]: http://www.freewisdom.org/projects/python-markdown
[pygments]: http://pygments.org/
First, install the extension in your `markdoc.yaml` file:
:::yaml
wiki-name: My Wiki
## ...other settings...
markdown:
extensions:
- codehilite # the important bit
Pygments should have been installed as a dependency when you installed Markdoc.
Initially, syntax highlighting will be applied to every code block Markdown encounters. Pygments will guess which lexer to apply to each block. To specify the language of a block, add a `:::LANG` prefix to blocks of code. For example:
:::text
:::python
def hello():
print "Hello, World!"
Will render to:
:::python
def hello():
print "Hello, World!"
To switch syntax highlighting off for a block, use `:::text`.
If you want a block to have line numbers, use `#!LANG` instead. For example:
:::text
#!ruby
class Foo
def bar
@baz
end
end
Will render to:
#!ruby
class Foo
def bar
@baz
end
end
If you add a shebang to a code block, like `#!/bin/bash` or `#!/usr/bin/env python`, the language will be guessed and the shebang included in the output. In the final text. So for example, the following:
:::text
#!/bin/bash
echo "Hello, World"
Will render to:
#!/bin/bash
echo "Hello, World"
| {
"repo_name": "zacharyvoase/markdoc",
"stars": "347",
"repo_language": "Python",
"file_name": "apache.md",
"mime_type": "text/plain"
} |
# Serving Markdoc Builds With Apache
By default, you should just serve your HTML root out of Apache; that will work very simply. You will need a few options to your `.htaccess` file get the same semantics as the built-in web server (the one which runs when you do `markdoc serve`):
#!text
Options +MultiViews
<FilesMatch "\.html$">
ForceType 'application/xhtml+xml; charset=UTF-8'
</FilesMatch>
The first directive will cause requests for `/directory/filename` to look for `directory/filename.html` in your HTML root, allowing for more natural references between pages in your wiki.
The second part will cause `.html` files to be served as valid UTF-8-encoded XHTML, which Markdoc assumes they are.
| {
"repo_name": "zacharyvoase/markdoc",
"stars": "347",
"repo_language": "Python",
"file_name": "apache.md",
"mime_type": "text/plain"
} |
# AdelaiDepth
[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/1rDLZBtiUgsFJrrL-xOgTVWxj6PMK9swq?usp=sharing)
AdelaiDepth is an open source toolbox for monocular depth prediction. Relevant work from our group is open-sourced here.
AdelaiDepth contains the following algorithms:
* Boosting Depth: [Code](https://github.com/guangkaixu/BoostingDepth), [Towards 3D Scene Reconstruction from Locally Scale-Aligned
Monocular Video Depth (Boosting Monocular Depth Estimation with Sparse Guided Points)](https://arxiv.org/abs/2202.01470)
* 3D Scene Shape (Best Paper Finalist): [Code](https://github.com/aim-uofa/AdelaiDepth/tree/main/LeReS), [Learning to Recover 3D Scene Shape from a Single Image](https://arxiv.org/abs/2012.09365)
* DiverseDepth: [Code](https://github.com/YvanYin/DiverseDepth), [Virtual Normal: Enforcing Geometric Constraints for Accurate and Robust Depth Prediction](https://arxiv.org/abs/2103.04216), [DiverseDepth: Affine-invariant Depth Prediction Using Diverse Data](https://arxiv.org/abs/2002.00569)
* Virtual Normal: [Code](https://github.com/YvanYin/VNL_Monocular_Depth_Prediction), [Enforcing geometric constraints of virtual normal for depth prediction](https://arxiv.org/abs/1907.12209)
* Depth Estimation Using Deep Convolutional Neural Fields: [Code](https://bitbucket.org/fayao/dcnf-fcsp/src/master/), [Learning Depth from Single Monocular Images Using Deep Convolutional Neural Fields, TPAMI'16, CVPR'15](https://arxiv.org/abs/1502.07411)
## News:
* [May. 31, 2022] Code for local recovery strategy of BoostingDepth is released.
* [May. 31, 2022] Training code and data of LeReS project have been released.
* [Feb. 13, 2022] Training code and data of DiverseDepth project have been released.
* [Jun. 13, 2021] Our "Learning to Recover 3D Scene Shape from a Single Image" work is one of the CVPR'21 Best Paper Finalists.
* [Jun. 6, 2021] We have made the training data of DiverseDepth available.
## Results and Dataset Examples:
1. 3D Scene Shape
You may want to check [this video](http://www.youtube.com/watch?v=UuT5_GK_TWk) which provides a very brief introduction to the work:
<table>
<tr>
<td>RGB</td>
<td>Depth</td>
<td>Point Cloud</td>
</tr>
<tr>
<td><img src="examples/2-rgb.jpg" height=300></td>
<td><img src="examples/2.jpg" height=300></td>
<td><img src="examples/2.gif" height=300></td>
</tr>
</table>
![Depth](./examples/depth.png)
2. DiverseDepth
* Results examples:
![Depth](./examples/diverse_depth.jpg)
* DiverseDepth dataset examples:
![DiverseDepth dataset](./examples/diversedepth_dataset_examples.png)
## BibTeX
```BibTeX
@article{yin2022towards,
title={Towards Accurate Reconstruction of 3D Scene Shape from A Single Monocular Image},
author={Yin, Wei and Zhang, Jianming and Wang, Oliver and Niklaus, Simon and Chen, Simon and Liu, Yifan and Shen, Chunhua},
journal={IEEE Transactions on Pattern Analysis and Machine Intelligence (TPAMI)},
year={2022}
}
@inproceedings{Yin2019enforcing,
title = {Enforcing geometric constraints of virtual normal for depth prediction},
author = {Yin, Wei and Liu, Yifan and Shen, Chunhua and Yan, Youliang},
booktitle = {The IEEE International Conference on Computer Vision (ICCV)},
year = {2019}
}
@inproceedings{Wei2021CVPR,
title = {Learning to Recover 3D Scene Shape from a Single Image},
author = {Wei Yin and Jianming Zhang and Oliver Wang and Simon Niklaus and Long Mai and Simon Chen and Chunhua Shen},
booktitle = {Proc. IEEE Conf. Comp. Vis. Patt. Recogn. (CVPR)},
year = {2021}
}
@article{yin2021virtual,
title = {Virtual Normal: Enforcing Geometric Constraints for Accurate and Robust Depth Prediction},
author = {Yin, Wei and Liu, Yifan and Shen, Chunhua},
journal = {IEEE Transactions on Pattern Analysis and Machine Intelligence (TPAMI)},
year = {2021}
}
```
### Contact
* Wei Yin <https://yvanyin.net/>
* Chunhua Shen <https://cshen.github.io>
## License
The *3D Scene Shape* code is under a non-commercial license from *Adobe Research*. See the [LICENSE file](LeReS/LICENSE) for details.
Other depth prediction projects are licensed under the 2-clause BSD License for non-commercial use -- see the [LICENSE file](LICENSE) for details. For commercial use, please contact [Chunhua Shen](https://cshen.github.io).
| {
"repo_name": "aim-uofa/AdelaiDepth",
"stars": "926",
"repo_language": "Python",
"file_name": "train.py",
"mime_type": "text/x-script.python"
} |
# We defaulty put all data in the Train/datasets. You can put them anywhere but create softlinks under Train/datasets.
# We provide two way to download data. 1) Cloudstor; 2) Google Drive
# 1. Download from CloudStor:
# download part-fore
cd Train/datasets
mkdir DiverseDepth
cd DiverseDepth
wget https://cloudstor.aarnet.edu.au/plus/s/HNfpS4tAz3NePtU/download -O DiverseDepth_d.tar.gz
wget https://cloudstor.aarnet.edu.au/plus/s/n5bOhKk52fXILp9/download -O DiverseDepth_rgb.zip
tar -xvf DiverseDepth_d.tar.gz
unzip DiverseDepth_rgb.zip
# download part_in, collected from taskonomy
cd ..
mkdir taskonomy
cd taskonomy
# (original link) wget https://cloudstor.aarnet.edu.au/plus/s/Q4jqXt2YfqcGZvK/download -O annotations.zip
wget --no-check-certificate "https://drive.google.com/uc?export=download&id=1Nhz0BABZBjjE-ITbJoSyVSuaJkD-iWVv" -O annotations.zip
wget https://cloudstor.aarnet.edu.au/plus/s/EBv6jRp326zMlf6/download -O taskonomy_rgbs.tar.gz
wget https://cloudstor.aarnet.edu.au/plus/s/t334giSOJtC97Uq/download -O taskonomy_ins_planes.tar.gz
wget https://cloudstor.aarnet.edu.au/plus/s/kvLcrVSWfOsERsI/download -O taskonomy_depths.tar.gz
tar -xvf ./*.tar.gz
unzip annotations.zip
# HRWSI
cd ..
mkdir HRWSI
cd HRWSI
wget https://cloudstor.aarnet.edu.au/plus/s/oaWj2Cfvif3WuD0/download -O HRWSI.zip
unzip HRWSI.zip
# Holopix50k
cd ..
mkdir Holopix50k
cd Holopix50k
wget https://cloudstor.aarnet.edu.au/plus/s/LuOsawtGq6cDAKr/download -O Holopix50k.zip
unzip Holopix50k.zip
# The overview of data under Train/datasets are:
# -Train
# |--datasets
# |--DiverseDepth
# |--annotations
# |--depths
# |--rgbs
# |--taskonomy
# |--annotations
# |--depths
# |--rgbs
# |--ins_planes
# |--HRWSI
# |--Holopix50k
| {
"repo_name": "aim-uofa/AdelaiDepth",
"stars": "926",
"repo_language": "Python",
"file_name": "train.py",
"mime_type": "text/x-script.python"
} |
matplotlib
opencv-python
plyfile
pyyaml
dill
tensorboardX | {
"repo_name": "aim-uofa/AdelaiDepth",
"stars": "926",
"repo_language": "Python",
"file_name": "train.py",
"mime_type": "text/x-script.python"
} |
# Best Paper Finalist: [Learning to Recover 3D Scene Shape from a Single Image](https://arxiv.org/abs/2012.09365)
This repository contains the source code of the paper:
Wei Yin, Jianming Zhang, Oliver Wang, Simon Niklaus, Long Mai, Simon Chen, Chunhua Shen [Learning to Recover 3D Scene Shape from a Single Image](https://arxiv.org/abs/2012.09365). Published in Proc. IEEE Conf. Computer Vision and Pattern Recognition (CVPR) 2021.
[NEW] Training codes have been released!!
## Some Results
<table>
<tr>
<td><img src="../examples/1.gif" width=400 height=300></td>
<td><img src="../examples/2.gif" width=400 height=300></td>
<td><img src="../examples/3.gif" width=400 height=300></td>
</tr>
</table>
You may want to check [this video](http://www.youtube.com/watch?v=UuT5_GK_TWk) which provides a very brief introduction to the work:
## Prerequisite
```bash
conda create -n LeReS python=3.7
conda activate LeReS
conda install pytorch==1.6.0 torchvision==0.7.0 cudatoolkit=10.2 -c pytorch
pip install -r requirements.txt
```
If you only want to test the monocular depth estimation from a single image, you can directly go to 'Quick Start' and follow Step 3.
If you want to reconstruct 3D shape from a single image, please install [torchsparse](https://github.com/mit-han-lab/torchsparse) packages as follows. If you have any issues with torchsparse, please refer to [torchsparse](https://github.com/mit-han-lab/torchsparse).
```bash
#torchsparse currently only supports PyTorch 1.6.0 + CUDA 10.2 + CUDNN 7.6.2.
sudo apt-get install libsparsehash-dev
pip install --upgrade git+https://github.com/mit-han-lab/[email protected]
```
## Quick Start (Inference)
1. Download the model weights
* [ResNet50 backbone](https://cloudstor.aarnet.edu.au/plus/s/VVQayrMKPlpVkw9)
* [ResNeXt101 backbone](https://cloudstor.aarnet.edu.au/plus/s/lTIJF4vrvHCAI31)
2. Prepare data.
* Move the downloaded weights to `LeReS/Minist_Test/`
* Put the testing RGB images to `LeReS/Minist_Test/test_images/`. Predicted depths and reconstructed point cloud are saved under `LeReS/Minist_Test/test_images/outputs`
3. Test monocular depth prediction. Note that the predicted depths are affine-invariant.
```bash
export PYTHONPATH="<PATH to Minist_Test>"
# run the ResNet-50
python ./tools/test_depth.py --load_ckpt res50.pth --backbone resnet50
# run the ResNeXt-101
python ./tools/test_depth.py --load_ckpt res101.pth --backbone resnext101
```
4. Test 3D reconstruction from a single image.
```bash
export PYTHONPATH="<PATH to Minist_Test>"
# run the ResNet-50
python ./tools/test_shape.py --load_ckpt res50.pth --backbone resnet50
# run the ResNeXt-101
python ./tools/test_shape.py --load_ckpt res101.pth --backbone resnext101
```
## Training
1. (Optional) Run a demo training to verify the python environment.
```
cd Train/scripts
sh train_demo.sh
```
2. Download the training data. Please run 'download_data.sh' to achieve datasets of taskonomy, DiverseDepth, HWRSI and Holopix50k. All data are organized under the `Train/datasets`. The structure of all data are as follows.
```
|--Train
| |--data
| |--lib
| |--scripts
| |--tools
| |--datasets
| | |--DiverseDepth
| | | |--annotations
| | | |--depths
| | | |--rgbs
| | |--taskonomy
| | | |--annotations
| | | |--depths
| | | |--rgbs
| | | |--ins_planes
| | |--HRWSI
| | |--Holopix50k
```
3. Train the network. The default setting used 4 gpus. If you want to use more gpus, please set `$CUDA_VISIBLE_DEVICES`, such as `export CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7`. The `--batchsize` is the number of samples on a single gpu.
```
cd Train/scripts
sh train.sh
```
4. Test the network on a benchmark. We provide a sample code for testing on NYU. Please download the NYU testing data [test.mat](https://cloudstor.aarnet.edu.au/plus/s/G2ckXCJX3pvrzRU) for evaluation and move it to `Train/datasets/test.mat`. If you want to test on other benchmarks, you can follow the sample code.
```
cd Train/scripts
sh test.sh
```
## BibTeX
```BibTeX
@article{yin2022towards,
title={Towards Accurate Reconstruction of 3D Scene Shape from A Single Monocular Image},
author={Yin, Wei and Zhang, Jianming and Wang, Oliver and Niklaus, Simon and Chen, Simon and Liu, Yifan and Shen, Chunhua},
journal={TPAMI},
year={2022}
}
@inproceedings{Wei2021CVPR,
title = {Learning to Recover 3D Scene Shape from a Single Image},
author = {Wei Yin and Jianming Zhang and Oliver Wang and Simon Niklaus and Long Mai and Simon Chen and Chunhua Shen},
booktitle = {Proc. IEEE Conf. Comp. Vis. Patt. Recogn. (CVPR)},
year = {2021}
}
```
## License
This project is under a non-commercial license from Adobe Research. See the [LICENSE file](LICENSE) for details.
| {
"repo_name": "aim-uofa/AdelaiDepth",
"stars": "926",
"repo_language": "Python",
"file_name": "train.py",
"mime_type": "text/x-script.python"
} |
import importlib
import torch
import os
from collections import OrderedDict
def get_func(func_name):
"""Helper to return a function object by name. func_name must identify a
function in this module or the path to a function relative to the base
'modeling' module.
"""
if func_name == '':
return None
try:
parts = func_name.split('.')
# Refers to a function in this module
if len(parts) == 1:
return globals()[parts[0]]
# Otherwise, assume we're referencing a module under modeling
module_name = 'lib.' + '.'.join(parts[:-1])
module = importlib.import_module(module_name)
return getattr(module, parts[-1])
except Exception:
print('Failed to f1ind function: %s', func_name)
raise
def load_ckpt(args, depth_model, shift_model, focal_model):
"""
Load checkpoint.
"""
if os.path.isfile(args.load_ckpt):
print("loading checkpoint %s" % args.load_ckpt)
checkpoint = torch.load(args.load_ckpt)
if shift_model is not None:
shift_model.load_state_dict(strip_prefix_if_present(checkpoint['shift_model'], 'module.'),
strict=True)
if focal_model is not None:
focal_model.load_state_dict(strip_prefix_if_present(checkpoint['focal_model'], 'module.'),
strict=True)
depth_model.load_state_dict(strip_prefix_if_present(checkpoint['depth_model'], "module."),
strict=True)
del checkpoint
torch.cuda.empty_cache()
def strip_prefix_if_present(state_dict, prefix):
keys = sorted(state_dict.keys())
if not all(key.startswith(prefix) for key in keys):
return state_dict
stripped_state_dict = OrderedDict()
for key, value in state_dict.items():
stripped_state_dict[key.replace(prefix, "")] = value
return stripped_state_dict | {
"repo_name": "aim-uofa/AdelaiDepth",
"stars": "926",
"repo_language": "Python",
"file_name": "train.py",
"mime_type": "text/x-script.python"
} |
import torch.nn as nn
import torchsparse.nn as spnn
from torchsparse.point_tensor import PointTensor
from lib.spvcnn_utils import *
__all__ = ['SPVCNN_CLASSIFICATION']
class BasicConvolutionBlock(nn.Module):
def __init__(self, inc, outc, ks=3, stride=1, dilation=1):
super().__init__()
self.net = nn.Sequential(
spnn.Conv3d(inc,
outc,
kernel_size=ks,
dilation=dilation,
stride=stride),
spnn.BatchNorm(outc),
spnn.ReLU(True))
def forward(self, x):
out = self.net(x)
return out
class BasicDeconvolutionBlock(nn.Module):
def __init__(self, inc, outc, ks=3, stride=1):
super().__init__()
self.net = nn.Sequential(
spnn.Conv3d(inc,
outc,
kernel_size=ks,
stride=stride,
transpose=True),
spnn.BatchNorm(outc),
spnn.ReLU(True))
def forward(self, x):
return self.net(x)
class ResidualBlock(nn.Module):
def __init__(self, inc, outc, ks=3, stride=1, dilation=1):
super().__init__()
self.net = nn.Sequential(
spnn.Conv3d(inc,
outc,
kernel_size=ks,
dilation=dilation,
stride=stride), spnn.BatchNorm(outc),
spnn.ReLU(True),
spnn.Conv3d(outc,
outc,
kernel_size=ks,
dilation=dilation,
stride=1),
spnn.BatchNorm(outc)
)
self.downsample = nn.Sequential() if (inc == outc and stride == 1) else \
nn.Sequential(
spnn.Conv3d(inc, outc, kernel_size=1, dilation=1, stride=stride),
spnn.BatchNorm(outc)
)
self.relu = spnn.ReLU(True)
def forward(self, x):
out = self.relu(self.net(x) + self.downsample(x))
return out
class SPVCNN_CLASSIFICATION(nn.Module):
def __init__(self, **kwargs):
super().__init__()
cr = kwargs.get('cr', 1.0)
cs = [32, 32, 64, 128, 256, 256, 128, 96, 96]
cs = [int(cr * x) for x in cs]
if 'pres' in kwargs and 'vres' in kwargs:
self.pres = kwargs['pres']
self.vres = kwargs['vres']
self.stem = nn.Sequential(
spnn.Conv3d(kwargs['input_channel'], cs[0], kernel_size=3, stride=1),
spnn.BatchNorm(cs[0]),
spnn.ReLU(True),
spnn.Conv3d(cs[0], cs[0], kernel_size=3, stride=1),
spnn.BatchNorm(cs[0]),
spnn.ReLU(True))
self.stage1 = nn.Sequential(
BasicConvolutionBlock(cs[0], cs[0], ks=2, stride=2, dilation=1),
ResidualBlock(cs[0], cs[1], ks=3, stride=1, dilation=1),
ResidualBlock(cs[1], cs[1], ks=3, stride=1, dilation=1),
)
self.stage2 = nn.Sequential(
BasicConvolutionBlock(cs[1], cs[1], ks=2, stride=2, dilation=1),
ResidualBlock(cs[1], cs[2], ks=3, stride=1, dilation=1),
ResidualBlock(cs[2], cs[2], ks=3, stride=1, dilation=1),
)
self.stage3 = nn.Sequential(
BasicConvolutionBlock(cs[2], cs[2], ks=2, stride=2, dilation=1),
ResidualBlock(cs[2], cs[3], ks=3, stride=1, dilation=1),
ResidualBlock(cs[3], cs[3], ks=3, stride=1, dilation=1),
)
self.stage4 = nn.Sequential(
BasicConvolutionBlock(cs[3], cs[3], ks=2, stride=2, dilation=1),
ResidualBlock(cs[3], cs[4], ks=3, stride=1, dilation=1),
ResidualBlock(cs[4], cs[4], ks=3, stride=1, dilation=1),
)
self.avg_pool = spnn.GlobalAveragePooling()
self.classifier = nn.Sequential(nn.Linear(cs[4], kwargs['num_classes']))
self.point_transforms = nn.ModuleList([
nn.Sequential(
nn.Linear(cs[0], cs[4]),
nn.BatchNorm1d(cs[4]),
nn.ReLU(True),
),
])
self.weight_initialization()
self.dropout = nn.Dropout(0.3, True)
def weight_initialization(self):
for m in self.modules():
if isinstance(m, nn.BatchNorm1d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def forward(self, x):
# x: SparseTensor z: PointTensor
z = PointTensor(x.F, x.C.float())
x0 = initial_voxelize(z, self.pres, self.vres)
x0 = self.stem(x0)
z0 = voxel_to_point(x0, z, nearest=False)
z0.F = z0.F
x1 = point_to_voxel(x0, z0)
x1 = self.stage1(x1)
x2 = self.stage2(x1)
x3 = self.stage3(x2)
x4 = self.stage4(x3)
z1 = voxel_to_point(x4, z0)
z1.F = z1.F + self.point_transforms[0](z0.F)
y1 = point_to_voxel(x4, z1)
pool = self.avg_pool(y1)
out = self.classifier(pool)
return out
| {
"repo_name": "aim-uofa/AdelaiDepth",
"stars": "926",
"repo_language": "Python",
"file_name": "train.py",
"mime_type": "text/x-script.python"
} |
import os
import numpy as np
import torch
from torchsparse import SparseTensor
from torchsparse.utils import sparse_collate_fn, sparse_quantize
from plyfile import PlyData, PlyElement
def init_image_coor(height, width, u0=None, v0=None):
u0 = width / 2.0 if u0 is None else u0
v0 = height / 2.0 if v0 is None else v0
x_row = np.arange(0, width)
x = np.tile(x_row, (height, 1))
x = x.astype(np.float32)
u_u0 = x - u0
y_col = np.arange(0, height)
y = np.tile(y_col, (width, 1)).T
y = y.astype(np.float32)
v_v0 = y - v0
return u_u0, v_v0
def depth_to_pcd(depth, u_u0, v_v0, f, invalid_value=0):
mask_invalid = depth <= invalid_value
depth[mask_invalid] = 0.0
x = u_u0 / f * depth
y = v_v0 / f * depth
z = depth
pcd = np.stack([x, y, z], axis=2)
return pcd, ~mask_invalid
def pcd_to_sparsetensor(pcd, mask_valid, voxel_size=0.01, num_points=100000):
pcd_valid = pcd[mask_valid]
block_ = pcd_valid
block = np.zeros_like(block_)
block[:, :3] = block_[:, :3]
pc_ = np.round(block_[:, :3] / voxel_size)
pc_ -= pc_.min(0, keepdims=1)
feat_ = block
# transfer point cloud to voxels
inds = sparse_quantize(pc_,
feat_,
return_index=True,
return_invs=False)
if len(inds) > num_points:
inds = np.random.choice(inds, num_points, replace=False)
pc = pc_[inds]
feat = feat_[inds]
lidar = SparseTensor(feat, pc)
feed_dict = [{'lidar': lidar}]
inputs = sparse_collate_fn(feed_dict)
return inputs
def pcd_uv_to_sparsetensor(pcd, u_u0, v_v0, mask_valid, f= 500.0, voxel_size=0.01, mask_side=None, num_points=100000):
if mask_side is not None:
mask_valid = mask_valid & mask_side
pcd_valid = pcd[mask_valid]
u_u0_valid = u_u0[mask_valid][:, np.newaxis] / f
v_v0_valid = v_v0[mask_valid][:, np.newaxis] / f
block_ = np.concatenate([pcd_valid, u_u0_valid, v_v0_valid], axis=1)
block = np.zeros_like(block_)
block[:, :] = block_[:, :]
pc_ = np.round(block_[:, :3] / voxel_size)
pc_ -= pc_.min(0, keepdims=1)
feat_ = block
# transfer point cloud to voxels
inds = sparse_quantize(pc_,
feat_,
return_index=True,
return_invs=False)
if len(inds) > num_points:
inds = np.random.choice(inds, num_points, replace=False)
pc = pc_[inds]
feat = feat_[inds]
lidar = SparseTensor(feat, pc)
feed_dict = [{'lidar': lidar}]
inputs = sparse_collate_fn(feed_dict)
return inputs
def refine_focal_one_step(depth, focal, model, u0, v0):
# reconstruct PCD from depth
u_u0, v_v0 = init_image_coor(depth.shape[0], depth.shape[1], u0=u0, v0=v0)
pcd, mask_valid = depth_to_pcd(depth, u_u0, v_v0, f=focal, invalid_value=0)
# input for the voxelnet
feed_dict = pcd_uv_to_sparsetensor(pcd, u_u0, v_v0, mask_valid, f=focal, voxel_size=0.005, mask_side=None)
inputs = feed_dict['lidar'].cuda()
outputs = model(inputs)
return outputs
def refine_shift_one_step(depth_wshift, model, focal, u0, v0):
# reconstruct PCD from depth
u_u0, v_v0 = init_image_coor(depth_wshift.shape[0], depth_wshift.shape[1], u0=u0, v0=v0)
pcd_wshift, mask_valid = depth_to_pcd(depth_wshift, u_u0, v_v0, f=focal, invalid_value=0)
# input for the voxelnet
feed_dict = pcd_to_sparsetensor(pcd_wshift, mask_valid, voxel_size=0.01)
inputs = feed_dict['lidar'].cuda()
outputs = model(inputs)
return outputs
def refine_focal(depth, focal, model, u0, v0):
last_scale = 1
focal_tmp = np.copy(focal)
for i in range(1):
scale = refine_focal_one_step(depth, focal_tmp, model, u0, v0)
focal_tmp = focal_tmp / scale.item()
last_scale = last_scale * scale
return torch.tensor([[last_scale]])
def refine_shift(depth_wshift, model, focal, u0, v0):
depth_wshift_tmp = np.copy(depth_wshift)
last_shift = 0
for i in range(1):
shift = refine_shift_one_step(depth_wshift_tmp, model, focal, u0, v0)
shift = shift if shift.item() < 0.7 else torch.tensor([[0.7]])
depth_wshift_tmp -= shift.item()
last_shift += shift.item()
return torch.tensor([[last_shift]])
def reconstruct_3D(depth, f):
"""
Reconstruct depth to 3D pointcloud with the provided focal length.
Return:
pcd: N X 3 array, point cloud
"""
cu = depth.shape[1] / 2
cv = depth.shape[0] / 2
width = depth.shape[1]
height = depth.shape[0]
row = np.arange(0, width, 1)
u = np.array([row for i in np.arange(height)])
col = np.arange(0, height, 1)
v = np.array([col for i in np.arange(width)])
v = v.transpose(1, 0)
if f > 1e5:
print('Infinit focal length!!!')
x = u - cu
y = v - cv
z = depth / depth.max() * x.max()
else:
x = (u - cu) * depth / f
y = (v - cv) * depth / f
z = depth
x = np.reshape(x, (width * height, 1)).astype(np.float)
y = np.reshape(y, (width * height, 1)).astype(np.float)
z = np.reshape(z, (width * height, 1)).astype(np.float)
pcd = np.concatenate((x, y, z), axis=1)
pcd = pcd.astype(np.int)
return pcd
def save_point_cloud(pcd, rgb, filename, binary=True):
"""Save an RGB point cloud as a PLY file.
:paras
@pcd: Nx3 matrix, the XYZ coordinates
@rgb: NX3 matrix, the rgb colors for each 3D point
"""
assert pcd.shape[0] == rgb.shape[0]
if rgb is None:
gray_concat = np.tile(np.array([128], dtype=np.uint8), (pcd.shape[0], 3))
points_3d = np.hstack((pcd, gray_concat))
else:
points_3d = np.hstack((pcd, rgb))
python_types = (float, float, float, int, int, int)
npy_types = [('x', 'f4'), ('y', 'f4'), ('z', 'f4'), ('red', 'u1'), ('green', 'u1'),
('blue', 'u1')]
if binary is True:
# Format into NumPy structured array
vertices = []
for row_idx in range(points_3d.shape[0]):
cur_point = points_3d[row_idx]
vertices.append(tuple(dtype(point) for dtype, point in zip(python_types, cur_point)))
vertices_array = np.array(vertices, dtype=npy_types)
el = PlyElement.describe(vertices_array, 'vertex')
# Write
PlyData([el]).write(filename)
else:
x = np.squeeze(points_3d[:, 0])
y = np.squeeze(points_3d[:, 1])
z = np.squeeze(points_3d[:, 2])
r = np.squeeze(points_3d[:, 3])
g = np.squeeze(points_3d[:, 4])
b = np.squeeze(points_3d[:, 5])
ply_head = 'ply\n' \
'format ascii 1.0\n' \
'element vertex %d\n' \
'property float x\n' \
'property float y\n' \
'property float z\n' \
'property uchar red\n' \
'property uchar green\n' \
'property uchar blue\n' \
'end_header' % r.shape[0]
# ---- Save ply data to disk
np.savetxt(filename, np.column_stack((x, y, z, r, g, b)), fmt="%d %d %d %d %d %d", header=ply_head, comments='')
def reconstruct_depth(depth, rgb, dir, pcd_name, focal):
"""
para disp: disparity, [h, w]
para rgb: rgb image, [h, w, 3], in rgb format
"""
rgb = np.squeeze(rgb)
depth = np.squeeze(depth)
mask = depth < 1e-8
depth[mask] = 0
depth = depth / depth.max() * 10000
pcd = reconstruct_3D(depth, f=focal)
rgb_n = np.reshape(rgb, (-1, 3))
save_point_cloud(pcd, rgb_n, os.path.join(dir, pcd_name + '.ply'))
def recover_metric_depth(pred, gt):
if type(pred).__module__ == torch.__name__:
pred = pred.cpu().numpy()
if type(gt).__module__ == torch.__name__:
gt = gt.cpu().numpy()
gt = gt.squeeze()
pred = pred.squeeze()
mask = (gt > 1e-8) & (pred > 1e-8)
gt_mask = gt[mask]
pred_mask = pred[mask]
a, b = np.polyfit(pred_mask, gt_mask, deg=1)
pred_metric = a * pred + b
return pred_metric
| {
"repo_name": "aim-uofa/AdelaiDepth",
"stars": "926",
"repo_language": "Python",
"file_name": "train.py",
"mime_type": "text/x-script.python"
} |
import torch
import torch.nn as nn
from lib import network_auxi as network
from lib.net_tools import get_func
class RelDepthModel(nn.Module):
def __init__(self, backbone='resnet50'):
super(RelDepthModel, self).__init__()
if backbone == 'resnet50':
encoder = 'resnet50_stride32'
elif backbone == 'resnext101':
encoder = 'resnext101_stride32x8d'
self.depth_model = DepthModel(encoder)
def inference(self, rgb):
with torch.no_grad():
input = rgb.cuda()
depth = self.depth_model(input)
pred_depth_out = depth - depth.min() + 0.01
return pred_depth_out
class DepthModel(nn.Module):
def __init__(self, encoder):
super(DepthModel, self).__init__()
backbone = network.__name__.split('.')[-1] + '.' + encoder
self.encoder_modules = get_func(backbone)()
self.decoder_modules = network.Decoder()
def forward(self, x):
lateral_out = self.encoder_modules(x)
out_logit = self.decoder_modules(lateral_out)
return out_logit | {
"repo_name": "aim-uofa/AdelaiDepth",
"stars": "926",
"repo_language": "Python",
"file_name": "train.py",
"mime_type": "text/x-script.python"
} |
#!/usr/bin/env python
# coding: utf-8
import torch.nn as nn
try:
from urllib import urlretrieve
except ImportError:
from urllib.request import urlretrieve
__all__ = ['resnext101_32x8d']
model_urls = {
'resnext50_32x4d': 'https://download.pytorch.org/models/resnext50_32x4d-7cdf4587.pth',
'resnext101_32x8d': 'https://download.pytorch.org/models/resnext101_32x8d-8ba56ff5.pth',
}
def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=dilation, groups=groups, bias=False, dilation=dilation)
def conv1x1(in_planes, out_planes, stride=1):
"""1x1 convolution"""
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,
base_width=64, dilation=1, norm_layer=None):
super(BasicBlock, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
if groups != 1 or base_width != 64:
raise ValueError('BasicBlock only supports groups=1 and base_width=64')
if dilation > 1:
raise NotImplementedError("Dilation > 1 not supported in BasicBlock")
# Both self.conv1 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = norm_layer(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = norm_layer(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class Bottleneck(nn.Module):
# Bottleneck in torchvision places the stride for downsampling at 3x3 convolution(self.conv2)
# while original implementation places the stride at the first 1x1 convolution(self.conv1)
# according to "Deep residual learning for image recognition"https://arxiv.org/abs/1512.03385.
# This variant is also known as ResNet V1.5 and improves accuracy according to
# https://ngc.nvidia.com/catalog/model-scripts/nvidia:resnet_50_v1_5_for_pytorch.
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,
base_width=64, dilation=1, norm_layer=None):
super(Bottleneck, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
width = int(planes * (base_width / 64.)) * groups
# Both self.conv2 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv1x1(inplanes, width)
self.bn1 = norm_layer(width)
self.conv2 = conv3x3(width, width, stride, groups, dilation)
self.bn2 = norm_layer(width)
self.conv3 = conv1x1(width, planes * self.expansion)
self.bn3 = norm_layer(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, layers, num_classes=1000, zero_init_residual=False,
groups=1, width_per_group=64, replace_stride_with_dilation=None,
norm_layer=None):
super(ResNet, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
self._norm_layer = norm_layer
self.inplanes = 64
self.dilation = 1
if replace_stride_with_dilation is None:
# each element in the tuple indicates if we should replace
# the 2x2 stride with a dilated convolution instead
replace_stride_with_dilation = [False, False, False]
if len(replace_stride_with_dilation) != 3:
raise ValueError("replace_stride_with_dilation should be None "
"or a 3-element tuple, got {}".format(replace_stride_with_dilation))
self.groups = groups
self.base_width = width_per_group
self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = norm_layer(self.inplanes)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2,
dilate=replace_stride_with_dilation[0])
self.layer3 = self._make_layer(block, 256, layers[2], stride=2,
dilate=replace_stride_with_dilation[1])
self.layer4 = self._make_layer(block, 512, layers[3], stride=2,
dilate=replace_stride_with_dilation[2])
#self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
#self.fc = nn.Linear(512 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
# Zero-initialize the last BN in each residual branch,
# so that the residual branch starts with zeros, and each residual block behaves like an identity.
# This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677
if zero_init_residual:
for m in self.modules():
if isinstance(m, Bottleneck):
nn.init.constant_(m.bn3.weight, 0)
elif isinstance(m, BasicBlock):
nn.init.constant_(m.bn2.weight, 0)
def _make_layer(self, block, planes, blocks, stride=1, dilate=False):
norm_layer = self._norm_layer
downsample = None
previous_dilation = self.dilation
if dilate:
self.dilation *= stride
stride = 1
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
conv1x1(self.inplanes, planes * block.expansion, stride),
norm_layer(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample, self.groups,
self.base_width, previous_dilation, norm_layer))
self.inplanes = planes * block.expansion
for _ in range(1, blocks):
layers.append(block(self.inplanes, planes, groups=self.groups,
base_width=self.base_width, dilation=self.dilation,
norm_layer=norm_layer))
return nn.Sequential(*layers)
def _forward_impl(self, x):
# See note [TorchScript super()]
features = []
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
features.append(x)
x = self.layer2(x)
features.append(x)
x = self.layer3(x)
features.append(x)
x = self.layer4(x)
features.append(x)
#x = self.avgpool(x)
#x = torch.flatten(x, 1)
#x = self.fc(x)
return features
def forward(self, x):
return self._forward_impl(x)
def resnext101_32x8d(pretrained=True, **kwargs):
"""Constructs a ResNet-152 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
kwargs['groups'] = 32
kwargs['width_per_group'] = 8
model = ResNet(Bottleneck, [3, 4, 23, 3], **kwargs)
return model
if __name__ == '__main__':
import torch
model = resnext101_32x8d(True).cuda()
rgb = torch.rand((2, 3, 256, 256)).cuda()
out = model(rgb)
print(len(out))
| {
"repo_name": "aim-uofa/AdelaiDepth",
"stars": "926",
"repo_language": "Python",
"file_name": "train.py",
"mime_type": "text/x-script.python"
} |
{
"repo_name": "aim-uofa/AdelaiDepth",
"stars": "926",
"repo_language": "Python",
"file_name": "train.py",
"mime_type": "text/x-script.python"
} |
|
import torch.nn as nn
import torch.nn as NN
__all__ = ['ResNet', 'resnet18', 'resnet34', 'resnet50', 'resnet101',
'resnet152']
model_urls = {
'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',
'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',
'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',
'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',
}
def conv3x3(in_planes, out_planes, stride=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = NN.BatchNorm2d(planes) #NN.BatchNorm2d
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = NN.BatchNorm2d(planes) #NN.BatchNorm2d
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = NN.BatchNorm2d(planes) #NN.BatchNorm2d
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn2 = NN.BatchNorm2d(planes) #NN.BatchNorm2d
self.conv3 = nn.Conv2d(planes, planes * self.expansion, kernel_size=1, bias=False)
self.bn3 = NN.BatchNorm2d(planes * self.expansion) #NN.BatchNorm2d
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, layers, num_classes=1000):
self.inplanes = 64
super(ResNet, self).__init__()
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = NN.BatchNorm2d(64) #NN.BatchNorm2d
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
#self.avgpool = nn.AvgPool2d(7, stride=1)
#self.fc = nn.Linear(512 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
NN.BatchNorm2d(planes * block.expansion), #NN.BatchNorm2d
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
features = []
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
features.append(x)
x = self.layer2(x)
features.append(x)
x = self.layer3(x)
features.append(x)
x = self.layer4(x)
features.append(x)
return features
def resnet18(pretrained=True, **kwargs):
"""Constructs a ResNet-18 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)
return model
def resnet34(pretrained=True, **kwargs):
"""Constructs a ResNet-34 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(BasicBlock, [3, 4, 6, 3], **kwargs)
return model
def resnet50(pretrained=True, **kwargs):
"""Constructs a ResNet-50 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(Bottleneck, [3, 4, 6, 3], **kwargs)
return model
def resnet101(pretrained=True, **kwargs):
"""Constructs a ResNet-101 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(Bottleneck, [3, 4, 23, 3], **kwargs)
return model
def resnet152(pretrained=True, **kwargs):
"""Constructs a ResNet-152 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(Bottleneck, [3, 8, 36, 3], **kwargs)
return model
| {
"repo_name": "aim-uofa/AdelaiDepth",
"stars": "926",
"repo_language": "Python",
"file_name": "train.py",
"mime_type": "text/x-script.python"
} |
import torch
import torch.nn as nn
import torch.nn.init as init
from lib import Resnet, Resnext_torch
def resnet50_stride32():
return DepthNet(backbone='resnet', depth=50, upfactors=[2, 2, 2, 2])
def resnext101_stride32x8d():
return DepthNet(backbone='resnext101_32x8d', depth=101, upfactors=[2, 2, 2, 2])
class Decoder(nn.Module):
def __init__(self):
super(Decoder, self).__init__()
self.inchannels = [256, 512, 1024, 2048]
self.midchannels = [256, 256, 256, 512]
self.upfactors = [2,2,2,2]
self.outchannels = 1
self.conv = FTB(inchannels=self.inchannels[3], midchannels=self.midchannels[3])
self.conv1 = nn.Conv2d(in_channels=self.midchannels[3], out_channels=self.midchannels[2], kernel_size=3, padding=1, stride=1, bias=True)
self.upsample = nn.Upsample(scale_factor=self.upfactors[3], mode='bilinear', align_corners=True)
self.ffm2 = FFM(inchannels=self.inchannels[2], midchannels=self.midchannels[2], outchannels = self.midchannels[2], upfactor=self.upfactors[2])
self.ffm1 = FFM(inchannels=self.inchannels[1], midchannels=self.midchannels[1], outchannels = self.midchannels[1], upfactor=self.upfactors[1])
self.ffm0 = FFM(inchannels=self.inchannels[0], midchannels=self.midchannels[0], outchannels = self.midchannels[0], upfactor=self.upfactors[0])
self.outconv = AO(inchannels=self.midchannels[0], outchannels=self.outchannels, upfactor=2)
self._init_params()
def _init_params(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
init.normal_(m.weight, std=0.01)
if m.bias is not None:
init.constant_(m.bias, 0)
elif isinstance(m, nn.ConvTranspose2d):
init.normal_(m.weight, std=0.01)
if m.bias is not None:
init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d): #NN.BatchNorm2d
init.constant_(m.weight, 1)
init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
init.normal_(m.weight, std=0.01)
if m.bias is not None:
init.constant_(m.bias, 0)
def forward(self, features):
x_32x = self.conv(features[3]) # 1/32
x_32 = self.conv1(x_32x)
x_16 = self.upsample(x_32) # 1/16
x_8 = self.ffm2(features[2], x_16) # 1/8
x_4 = self.ffm1(features[1], x_8) # 1/4
x_2 = self.ffm0(features[0], x_4) # 1/2
#-----------------------------------------
x = self.outconv(x_2) # original size
return x
class DepthNet(nn.Module):
__factory = {
18: Resnet.resnet18,
34: Resnet.resnet34,
50: Resnet.resnet50,
101: Resnet.resnet101,
152: Resnet.resnet152
}
def __init__(self,
backbone='resnet',
depth=50,
upfactors=[2, 2, 2, 2]):
super(DepthNet, self).__init__()
self.backbone = backbone
self.depth = depth
self.pretrained = False
self.inchannels = [256, 512, 1024, 2048]
self.midchannels = [256, 256, 256, 512]
self.upfactors = upfactors
self.outchannels = 1
# Build model
if self.backbone == 'resnet':
if self.depth not in DepthNet.__factory:
raise KeyError("Unsupported depth:", self.depth)
self.encoder = DepthNet.__factory[depth](pretrained=self.pretrained)
elif self.backbone == 'resnext101_32x8d':
self.encoder = Resnext_torch.resnext101_32x8d(pretrained=self.pretrained)
else:
self.encoder = Resnext_torch.resnext101(pretrained=self.pretrained)
def forward(self, x):
x = self.encoder(x) # 1/32, 1/16, 1/8, 1/4
return x
class FTB(nn.Module):
def __init__(self, inchannels, midchannels=512):
super(FTB, self).__init__()
self.in1 = inchannels
self.mid = midchannels
self.conv1 = nn.Conv2d(in_channels=self.in1, out_channels=self.mid, kernel_size=3, padding=1, stride=1,
bias=True)
# NN.BatchNorm2d
self.conv_branch = nn.Sequential(nn.ReLU(inplace=True), \
nn.Conv2d(in_channels=self.mid, out_channels=self.mid, kernel_size=3,
padding=1, stride=1, bias=True), \
nn.BatchNorm2d(num_features=self.mid), \
nn.ReLU(inplace=True), \
nn.Conv2d(in_channels=self.mid, out_channels=self.mid, kernel_size=3,
padding=1, stride=1, bias=True))
self.relu = nn.ReLU(inplace=True)
self.init_params()
def forward(self, x):
x = self.conv1(x)
x = x + self.conv_branch(x)
x = self.relu(x)
return x
def init_params(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
init.normal_(m.weight, std=0.01)
if m.bias is not None:
init.constant_(m.bias, 0)
elif isinstance(m, nn.ConvTranspose2d):
# init.kaiming_normal_(m.weight, mode='fan_out')
init.normal_(m.weight, std=0.01)
# init.xavier_normal_(m.weight)
if m.bias is not None:
init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d): # NN.BatchNorm2d
init.constant_(m.weight, 1)
init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
init.normal_(m.weight, std=0.01)
if m.bias is not None:
init.constant_(m.bias, 0)
class ATA(nn.Module):
def __init__(self, inchannels, reduction=8):
super(ATA, self).__init__()
self.inchannels = inchannels
self.avg_pool = nn.AdaptiveAvgPool2d(1)
self.fc = nn.Sequential(nn.Linear(self.inchannels * 2, self.inchannels // reduction),
nn.ReLU(inplace=True),
nn.Linear(self.inchannels // reduction, self.inchannels),
nn.Sigmoid())
self.init_params()
def forward(self, low_x, high_x):
n, c, _, _ = low_x.size()
x = torch.cat([low_x, high_x], 1)
x = self.avg_pool(x)
x = x.view(n, -1)
x = self.fc(x).view(n, c, 1, 1)
x = low_x * x + high_x
return x
def init_params(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
# init.kaiming_normal_(m.weight, mode='fan_out')
# init.normal(m.weight, std=0.01)
init.xavier_normal_(m.weight)
if m.bias is not None:
init.constant_(m.bias, 0)
elif isinstance(m, nn.ConvTranspose2d):
# init.kaiming_normal_(m.weight, mode='fan_out')
# init.normal_(m.weight, std=0.01)
init.xavier_normal_(m.weight)
if m.bias is not None:
init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d): # NN.BatchNorm2d
init.constant_(m.weight, 1)
init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
init.normal_(m.weight, std=0.01)
if m.bias is not None:
init.constant_(m.bias, 0)
class FFM(nn.Module):
def __init__(self, inchannels, midchannels, outchannels, upfactor=2):
super(FFM, self).__init__()
self.inchannels = inchannels
self.midchannels = midchannels
self.outchannels = outchannels
self.upfactor = upfactor
self.ftb1 = FTB(inchannels=self.inchannels, midchannels=self.midchannels)
# self.ata = ATA(inchannels = self.midchannels)
self.ftb2 = FTB(inchannels=self.midchannels, midchannels=self.outchannels)
self.upsample = nn.Upsample(scale_factor=self.upfactor, mode='bilinear', align_corners=True)
self.init_params()
def forward(self, low_x, high_x):
x = self.ftb1(low_x)
x = x + high_x
x = self.ftb2(x)
x = self.upsample(x)
return x
def init_params(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
# init.kaiming_normal_(m.weight, mode='fan_out')
init.normal_(m.weight, std=0.01)
# init.xavier_normal_(m.weight)
if m.bias is not None:
init.constant_(m.bias, 0)
elif isinstance(m, nn.ConvTranspose2d):
# init.kaiming_normal_(m.weight, mode='fan_out')
init.normal_(m.weight, std=0.01)
# init.xavier_normal_(m.weight)
if m.bias is not None:
init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d): # NN.Batchnorm2d
init.constant_(m.weight, 1)
init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
init.normal_(m.weight, std=0.01)
if m.bias is not None:
init.constant_(m.bias, 0)
class AO(nn.Module):
# Adaptive output module
def __init__(self, inchannels, outchannels, upfactor=2):
super(AO, self).__init__()
self.inchannels = inchannels
self.outchannels = outchannels
self.upfactor = upfactor
self.adapt_conv = nn.Sequential(
nn.Conv2d(in_channels=self.inchannels, out_channels=self.inchannels // 2, kernel_size=3, padding=1,
stride=1, bias=True), \
nn.BatchNorm2d(num_features=self.inchannels // 2), \
nn.ReLU(inplace=True), \
nn.Conv2d(in_channels=self.inchannels // 2, out_channels=self.outchannels, kernel_size=3, padding=1,
stride=1, bias=True), \
nn.Upsample(scale_factor=self.upfactor, mode='bilinear', align_corners=True))
self.init_params()
def forward(self, x):
x = self.adapt_conv(x)
return x
def init_params(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
# init.kaiming_normal_(m.weight, mode='fan_out')
init.normal_(m.weight, std=0.01)
# init.xavier_normal_(m.weight)
if m.bias is not None:
init.constant_(m.bias, 0)
elif isinstance(m, nn.ConvTranspose2d):
# init.kaiming_normal_(m.weight, mode='fan_out')
init.normal_(m.weight, std=0.01)
# init.xavier_normal_(m.weight)
if m.bias is not None:
init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d): # NN.Batchnorm2d
init.constant_(m.weight, 1)
init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
init.normal_(m.weight, std=0.01)
if m.bias is not None:
init.constant_(m.bias, 0)
# ==============================================================================================================
class ResidualConv(nn.Module):
def __init__(self, inchannels):
super(ResidualConv, self).__init__()
# NN.BatchNorm2d
self.conv = nn.Sequential(
# nn.BatchNorm2d(num_features=inchannels),
nn.ReLU(inplace=False),
# nn.Conv2d(in_channels=inchannels, out_channels=inchannels, kernel_size=3, padding=1, stride=1, groups=inchannels,bias=True),
# nn.Conv2d(in_channels=inchannels, out_channels=inchannels, kernel_size=1, padding=0, stride=1, groups=1,bias=True)
nn.Conv2d(in_channels=inchannels, out_channels=inchannels / 2, kernel_size=3, padding=1, stride=1,
bias=False),
nn.BatchNorm2d(num_features=inchannels / 2),
nn.ReLU(inplace=False),
nn.Conv2d(in_channels=inchannels / 2, out_channels=inchannels, kernel_size=3, padding=1, stride=1,
bias=False)
)
self.init_params()
def forward(self, x):
x = self.conv(x) + x
return x
def init_params(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
# init.kaiming_normal_(m.weight, mode='fan_out')
init.normal_(m.weight, std=0.01)
# init.xavier_normal_(m.weight)
if m.bias is not None:
init.constant_(m.bias, 0)
elif isinstance(m, nn.ConvTranspose2d):
# init.kaiming_normal_(m.weight, mode='fan_out')
init.normal_(m.weight, std=0.01)
# init.xavier_normal_(m.weight)
if m.bias is not None:
init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d): # NN.BatchNorm2d
init.constant_(m.weight, 1)
init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
init.normal_(m.weight, std=0.01)
if m.bias is not None:
init.constant_(m.bias, 0)
class FeatureFusion(nn.Module):
def __init__(self, inchannels, outchannels):
super(FeatureFusion, self).__init__()
self.conv = ResidualConv(inchannels=inchannels)
# NN.BatchNorm2d
self.up = nn.Sequential(ResidualConv(inchannels=inchannels),
nn.ConvTranspose2d(in_channels=inchannels, out_channels=outchannels, kernel_size=3,
stride=2, padding=1, output_padding=1),
nn.BatchNorm2d(num_features=outchannels),
nn.ReLU(inplace=True))
def forward(self, lowfeat, highfeat):
return self.up(highfeat + self.conv(lowfeat))
def init_params(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
# init.kaiming_normal_(m.weight, mode='fan_out')
init.normal_(m.weight, std=0.01)
# init.xavier_normal_(m.weight)
if m.bias is not None:
init.constant_(m.bias, 0)
elif isinstance(m, nn.ConvTranspose2d):
# init.kaiming_normal_(m.weight, mode='fan_out')
init.normal_(m.weight, std=0.01)
# init.xavier_normal_(m.weight)
if m.bias is not None:
init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d): # NN.BatchNorm2d
init.constant_(m.weight, 1)
init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
init.normal_(m.weight, std=0.01)
if m.bias is not None:
init.constant_(m.bias, 0)
class SenceUnderstand(nn.Module):
def __init__(self, channels):
super(SenceUnderstand, self).__init__()
self.channels = channels
self.conv1 = nn.Sequential(nn.Conv2d(in_channels=512, out_channels=512, kernel_size=3, padding=1),
nn.ReLU(inplace=True))
self.pool = nn.AdaptiveAvgPool2d(8)
self.fc = nn.Sequential(nn.Linear(512 * 8 * 8, self.channels),
nn.ReLU(inplace=True))
self.conv2 = nn.Sequential(
nn.Conv2d(in_channels=self.channels, out_channels=self.channels, kernel_size=1, padding=0),
nn.ReLU(inplace=True))
self.initial_params()
def forward(self, x):
n, c, h, w = x.size()
x = self.conv1(x)
x = self.pool(x)
x = x.view(n, -1)
x = self.fc(x)
x = x.view(n, self.channels, 1, 1)
x = self.conv2(x)
x = x.repeat(1, 1, h, w)
return x
def initial_params(self, dev=0.01):
for m in self.modules():
if isinstance(m, nn.Conv2d):
# print torch.sum(m.weight)
m.weight.data.normal_(0, dev)
if m.bias is not None:
m.bias.data.fill_(0)
elif isinstance(m, nn.ConvTranspose2d):
# print torch.sum(m.weight)
m.weight.data.normal_(0, dev)
if m.bias is not None:
m.bias.data.fill_(0)
elif isinstance(m, nn.Linear):
m.weight.data.normal_(0, dev)
if __name__ == '__main__':
net = DepthNet(depth=50, pretrained=True)
print(net)
inputs = torch.ones(4,3,128,128)
out = net(inputs)
print(out.size())
| {
"repo_name": "aim-uofa/AdelaiDepth",
"stars": "926",
"repo_language": "Python",
"file_name": "train.py",
"mime_type": "text/x-script.python"
} |
import torchsparse.nn.functional as spf
from torchsparse.point_tensor import PointTensor
from torchsparse.utils.kernel_region import *
from torchsparse.utils.helpers import *
__all__ = ['initial_voxelize', 'point_to_voxel', 'voxel_to_point']
# z: PointTensor
# return: SparseTensor
def initial_voxelize(z, init_res, after_res):
new_float_coord = torch.cat(
[(z.C[:, :3] * init_res) / after_res, z.C[:, -1].view(-1, 1)], 1)
pc_hash = spf.sphash(torch.floor(new_float_coord).int())
sparse_hash = torch.unique(pc_hash)
idx_query = spf.sphashquery(pc_hash, sparse_hash)
counts = spf.spcount(idx_query.int(), len(sparse_hash))
inserted_coords = spf.spvoxelize(torch.floor(new_float_coord), idx_query,
counts)
inserted_coords = torch.round(inserted_coords).int()
inserted_feat = spf.spvoxelize(z.F, idx_query, counts)
new_tensor = SparseTensor(inserted_feat, inserted_coords, 1)
new_tensor.check()
z.additional_features['idx_query'][1] = idx_query
z.additional_features['counts'][1] = counts
z.C = new_float_coord
return new_tensor
# x: SparseTensor, z: PointTensor
# return: SparseTensor
def point_to_voxel(x, z):
if z.additional_features is None or z.additional_features.get('idx_query') is None\
or z.additional_features['idx_query'].get(x.s) is None:
#pc_hash = hash_gpu(torch.floor(z.C).int())
pc_hash = spf.sphash(
torch.cat([
torch.floor(z.C[:, :3] / x.s).int() * x.s,
z.C[:, -1].int().view(-1, 1)
], 1))
sparse_hash = spf.sphash(x.C)
idx_query = spf.sphashquery(pc_hash, sparse_hash)
counts = spf.spcount(idx_query.int(), x.C.shape[0])
z.additional_features['idx_query'][x.s] = idx_query
z.additional_features['counts'][x.s] = counts
else:
idx_query = z.additional_features['idx_query'][x.s]
counts = z.additional_features['counts'][x.s]
inserted_feat = spf.spvoxelize(z.F, idx_query, counts)
new_tensor = SparseTensor(inserted_feat, x.C, x.s)
new_tensor.coord_maps = x.coord_maps
new_tensor.kernel_maps = x.kernel_maps
return new_tensor
# x: SparseTensor, z: PointTensor
# return: PointTensor
def voxel_to_point(x, z, nearest=False):
if z.idx_query is None or z.weights is None or z.idx_query.get(
x.s) is None or z.weights.get(x.s) is None:
kr = KernelRegion(2, x.s, 1)
off = kr.get_kernel_offset().to(z.F.device)
#old_hash = kernel_hash_gpu(torch.floor(z.C).int(), off)
old_hash = spf.sphash(
torch.cat([
torch.floor(z.C[:, :3] / x.s).int() * x.s,
z.C[:, -1].int().view(-1, 1)
], 1), off)
pc_hash = spf.sphash(x.C.to(z.F.device))
idx_query = spf.sphashquery(old_hash, pc_hash)
weights = spf.calc_ti_weights(z.C, idx_query,
scale=x.s).transpose(0, 1).contiguous()
idx_query = idx_query.transpose(0, 1).contiguous()
if nearest:
weights[:, 1:] = 0.
idx_query[:, 1:] = -1
new_feat = spf.spdevoxelize(x.F, idx_query, weights)
new_tensor = PointTensor(new_feat,
z.C,
idx_query=z.idx_query,
weights=z.weights)
new_tensor.additional_features = z.additional_features
new_tensor.idx_query[x.s] = idx_query
new_tensor.weights[x.s] = weights
z.idx_query[x.s] = idx_query
z.weights[x.s] = weights
else:
new_feat = spf.spdevoxelize(x.F, z.idx_query.get(x.s), z.weights.get(x.s))
new_tensor = PointTensor(new_feat,
z.C,
idx_query=z.idx_query,
weights=z.weights)
new_tensor.additional_features = z.additional_features
return new_tensor
| {
"repo_name": "aim-uofa/AdelaiDepth",
"stars": "926",
"repo_language": "Python",
"file_name": "train.py",
"mime_type": "text/x-script.python"
} |
import cv2
import os
import argparse
import numpy as np
import torch
import matplotlib.pyplot as plt
import torchvision.transforms as transforms
from lib.multi_depth_model_woauxi import RelDepthModel
from lib.net_tools import load_ckpt
def parse_args():
parser = argparse.ArgumentParser(
description='Configs for LeReS')
parser.add_argument('--load_ckpt', default='./res50.pth', help='Checkpoint path to load')
parser.add_argument('--backbone', default='resnext101', help='Checkpoint path to load')
args = parser.parse_args()
return args
def scale_torch(img):
"""
Scale the image and output it in torch.tensor.
:param img: input rgb is in shape [H, W, C], input depth/disp is in shape [H, W]
:param scale: the scale factor. float
:return: img. [C, H, W]
"""
if len(img.shape) == 2:
img = img[np.newaxis, :, :]
if img.shape[2] == 3:
transform = transforms.Compose([transforms.ToTensor(),
transforms.Normalize((0.485, 0.456, 0.406) , (0.229, 0.224, 0.225) )])
img = transform(img)
else:
img = img.astype(np.float32)
img = torch.from_numpy(img)
return img
if __name__ == '__main__':
args = parse_args()
# create depth model
depth_model = RelDepthModel(backbone=args.backbone)
depth_model.eval()
# load checkpoint
load_ckpt(args, depth_model, None, None)
depth_model.cuda()
image_dir = os.path.dirname(os.path.dirname(__file__)) + '/test_images/'
imgs_list = os.listdir(image_dir)
imgs_list.sort()
imgs_path = [os.path.join(image_dir, i) for i in imgs_list if i != 'outputs']
image_dir_out = image_dir + '/outputs'
os.makedirs(image_dir_out, exist_ok=True)
for i, v in enumerate(imgs_path):
print('processing (%04d)-th image... %s' % (i, v))
rgb = cv2.imread(v)
rgb_c = rgb[:, :, ::-1].copy()
gt_depth = None
A_resize = cv2.resize(rgb_c, (448, 448))
rgb_half = cv2.resize(rgb, (rgb.shape[1]//2, rgb.shape[0]//2), interpolation=cv2.INTER_LINEAR)
img_torch = scale_torch(A_resize)[None, :, :, :]
pred_depth = depth_model.inference(img_torch).cpu().numpy().squeeze()
pred_depth_ori = cv2.resize(pred_depth, (rgb.shape[1], rgb.shape[0]))
# if GT depth is available, uncomment the following part to recover the metric depth
#pred_depth_metric = recover_metric_depth(pred_depth_ori, gt_depth)
img_name = v.split('/')[-1]
cv2.imwrite(os.path.join(image_dir_out, img_name), rgb)
# save depth
plt.imsave(os.path.join(image_dir_out, img_name[:-4]+'-depth.png'), pred_depth_ori, cmap='rainbow')
cv2.imwrite(os.path.join(image_dir_out, img_name[:-4]+'-depth_raw.png'), (pred_depth_ori/pred_depth_ori.max() * 60000).astype(np.uint16))
| {
"repo_name": "aim-uofa/AdelaiDepth",
"stars": "926",
"repo_language": "Python",
"file_name": "train.py",
"mime_type": "text/x-script.python"
} |
import os
import torch
import torch.nn.functional as F
import matplotlib.pyplot as plt
from skimage.io import imread
# Util function for loading point clouds|
import numpy as np
from plyfile import PlyData, PlyElement
# Data structures and functions for rendering
from pytorch3d.structures import Pointclouds
from pytorch3d.vis.plotly_vis import AxisArgs, plot_batch_individually, plot_scene
from pytorch3d.renderer import (
look_at_view_transform,
FoVOrthographicCameras,
PerspectiveCameras,
PointsRasterizationSettings,
PointsRenderer,
PulsarPointsRenderer,
PointsRasterizer,
AlphaCompositor,
NormWeightedCompositor,
look_at_rotation,
FoVPerspectiveCameras,
)
import imageio
device = torch.device("cuda:0")
def load_pcd_color(path):
#pointcloud = np.load(path)
pointcloud = PlyData.read(path).elements[0].data
verts = np.array([[data[0], data[1], data[2]] for data in pointcloud])
rgb = np.array([[data[3], data[4], data[5]] for data in pointcloud]) / 256.0
verts[:, 1] = -verts[:, 1]
verts[:, 0] = -verts[:, 0]
verts /= verts.max()
verts = verts - verts.mean(axis=0)
verts = torch.Tensor(verts).to(device)
rgb = torch.Tensor(rgb).to(device)
point_cloud = Pointclouds(points=[verts], features=[rgb])
return point_cloud
def load_pcd_color2(path):
#pointcloud = np.load(path)
# Load point cloud
pointcloud = np.load(path)
verts = torch.Tensor(pointcloud['verts']).to(device)
rgb = torch.Tensor(pointcloud['rgb']).to(device)
point_cloud = Pointclouds(points=[verts], features=[rgb])
return point_cloud
def rotate_pcd(point_cloud, angle):
rotate_matrix = np.array([[np.cos(angle), 0, np.sin(angle)],
[0, 1, 0]
[-np.sin(angle), 0, np.cos(angle)]])
#cnt = point_cloud.mean(axis=)
def render_single(point_cloud, azim=0):
# Initialize a camera.
R, T = look_at_view_transform(0.5, 10, azim=azim)
#cameras = FoVOrthographicCameras(device=device, R=R, T=T, znear=0.01)
cameras = FoVPerspectiveCameras(fov=50.0, device=device, R=R, T=T)
# Define the settings for rasterization and shading. Here we set the output image to be of size
# 512x512. As we are rendering images for visualization purposes only we will set faces_per_pixel=1
# and blur_radius=0.0. Refer to raster_points.py for explanations of these parameters.
raster_settings = PointsRasterizationSettings(
image_size=512,
radius=0.008,
points_per_pixel=50
)
# Create a points renderer by compositing points using an alpha compositor (nearer points
# are weighted more heavily). See [1] for an explanation.
rasterizer = PointsRasterizer(cameras=cameras, raster_settings=raster_settings)
renderer = PointsRenderer(
rasterizer=rasterizer,
compositor=AlphaCompositor()
)
images = renderer(point_cloud)
# plt.figure(figsize=(10, 10))
# plt.imshow(images[0, ..., :3].cpu().numpy())
# plt.grid("off")
# plt.axis("off")
# plt.show()
img_out = images[0, ...].cpu().numpy()
img_out = (img_out * 256).astype(np.uint8)
return img_out
def render_batches(point_cloud):
# Initialize a camera.
batch_size = 10
point_clouds = point_cloud.extend(batch_size)
azim = torch.linspace(-180, 180, batch_size)
R, T = look_at_view_transform(-1, 0, azim=azim)
#cameras = FoVOrthographicCameras(device=device, R=R, T=T, znear=0.01)
cameras = FoVPerspectiveCameras(fov=50.0, device=device, R=R, T=T)
# Define the settings for rasterization and shading. Here we set the output image to be of size
# 512x512. As we are rendering images for visualization purposes only we will set faces_per_pixel=1
# and blur_radius=0.0. Refer to raster_points.py for explanations of these parameters.
raster_settings = PointsRasterizationSettings(
image_size=512,
radius=0.008,
points_per_pixel=50
)
# Create a points renderer by compositing points using an alpha compositor (nearer points
# are weighted more heavily). See [1] for an explanation.
rasterizer = PointsRasterizer(cameras=cameras, raster_settings=raster_settings)
renderer = PointsRenderer(
rasterizer=rasterizer,
compositor=AlphaCompositor()
)
images = renderer(point_clouds)
plt.figure(figsize=(10, 10))
plt.imshow(images[0, ..., :3].cpu().numpy())
plt.grid("off")
plt.axis("off")
plt.show()
def render_create_GIF(point_cloud, filename):
# We will save images periodically and compose them into a GIF.
filename_output = filename + ".gif"
writer = imageio.get_writer(filename_output, mode='I', duration=0.01)
for i in range(-180, 180, 2):
image = render_single(point_cloud, i)
writer.append_data(image)
writer.close()
if __name__ == '__main__':
# path = '/mnt/data/WeiYin/publications/cvpr2021/first_page_fig/first_page_fig_results/8-rgb-recovershift.ply'
path = '/home/gk-ai/code/mesh710.ply'
point_cloud = load_pcd_color(path)
name = path.split('/')[-1][:-4]
#point_cloud = load_pcd_color2('/home/yvan/DeepLearning/Depth/MultiDepth/A-othertools/Render_PCD/data/PittsburghBridge/pointcloud.npz')
#render_single(point_cloud)
render_create_GIF(point_cloud, name) | {
"repo_name": "aim-uofa/AdelaiDepth",
"stars": "926",
"repo_language": "Python",
"file_name": "train.py",
"mime_type": "text/x-script.python"
} |
import cv2
import os
import argparse
import numpy as np
import torch
import matplotlib.pyplot as plt
import torchvision.transforms as transforms
from lib.test_utils import refine_focal, refine_shift
from lib.multi_depth_model_woauxi import RelDepthModel
from lib.net_tools import load_ckpt
from lib.spvcnn_classsification import SPVCNN_CLASSIFICATION
from lib.test_utils import reconstruct_depth
def parse_args():
parser = argparse.ArgumentParser(
description='Configs for LeReS')
parser.add_argument('--load_ckpt', default='./res50.pth', help='Checkpoint path to load')
parser.add_argument('--backbone', default='resnext101', help='Checkpoint path to load')
args = parser.parse_args()
return args
def scale_torch(img):
"""
Scale the image and output it in torch.tensor.
:param img: input rgb is in shape [H, W, C], input depth/disp is in shape [H, W]
:param scale: the scale factor. float
:return: img. [C, H, W]
"""
if len(img.shape) == 2:
img = img[np.newaxis, :, :]
if img.shape[2] == 3:
transform = transforms.Compose([transforms.ToTensor(),
transforms.Normalize((0.485, 0.456, 0.406) , (0.229, 0.224, 0.225) )])
img = transform(img)
else:
img = img.astype(np.float32)
img = torch.from_numpy(img)
return img
def make_shift_focallength_models():
shift_model = SPVCNN_CLASSIFICATION(input_channel=3,
num_classes=1,
cr=1.0,
pres=0.01,
vres=0.01
)
focal_model = SPVCNN_CLASSIFICATION(input_channel=5,
num_classes=1,
cr=1.0,
pres=0.01,
vres=0.01
)
shift_model.eval()
focal_model.eval()
return shift_model, focal_model
def reconstruct3D_from_depth(rgb, pred_depth, shift_model, focal_model):
cam_u0 = rgb.shape[1] / 2.0
cam_v0 = rgb.shape[0] / 2.0
pred_depth_norm = pred_depth - pred_depth.min() + 0.5
dmax = np.percentile(pred_depth_norm, 98)
pred_depth_norm = pred_depth_norm / dmax
# proposed focal length, FOV is 60', Note that 60~80' are acceptable.
proposed_scaled_focal = (rgb.shape[0] // 2 / np.tan((60/2.0)*np.pi/180))
# recover focal
focal_scale_1 = refine_focal(pred_depth_norm, proposed_scaled_focal, focal_model, u0=cam_u0, v0=cam_v0)
predicted_focal_1 = proposed_scaled_focal / focal_scale_1.item()
# recover shift
shift_1 = refine_shift(pred_depth_norm, shift_model, predicted_focal_1, cam_u0, cam_v0)
shift_1 = shift_1 if shift_1.item() < 0.6 else torch.tensor([0.6])
depth_scale_1 = pred_depth_norm - shift_1.item()
# recover focal
focal_scale_2 = refine_focal(depth_scale_1, predicted_focal_1, focal_model, u0=cam_u0, v0=cam_v0)
predicted_focal_2 = predicted_focal_1 / focal_scale_2.item()
return shift_1, predicted_focal_2, depth_scale_1
if __name__ == '__main__':
args = parse_args()
# create depth model
depth_model = RelDepthModel(backbone=args.backbone)
depth_model.eval()
# create shift and focal length model
shift_model, focal_model = make_shift_focallength_models()
# load checkpoint
load_ckpt(args, depth_model, shift_model, focal_model)
depth_model.cuda()
shift_model.cuda()
focal_model.cuda()
image_dir = os.path.dirname(os.path.dirname(__file__)) + '/test_images/'
imgs_list = os.listdir(image_dir)
imgs_list.sort()
imgs_path = [os.path.join(image_dir, i) for i in imgs_list if i != 'outputs']
image_dir_out = image_dir + '/outputs'
os.makedirs(image_dir_out, exist_ok=True)
for i, v in enumerate(imgs_path):
print('processing (%04d)-th image... %s' % (i, v))
rgb = cv2.imread(v)
rgb_c = rgb[:, :, ::-1].copy()
gt_depth = None
A_resize = cv2.resize(rgb_c, (448, 448))
rgb_half = cv2.resize(rgb, (rgb.shape[1]//2, rgb.shape[0]//2), interpolation=cv2.INTER_LINEAR)
img_torch = scale_torch(A_resize)[None, :, :, :]
pred_depth = depth_model.inference(img_torch).cpu().numpy().squeeze()
pred_depth_ori = cv2.resize(pred_depth, (rgb.shape[1], rgb.shape[0]))
# recover focal length, shift, and scale-invariant depth
shift, focal_length, depth_scaleinv = reconstruct3D_from_depth(rgb, pred_depth_ori,
shift_model, focal_model)
disp = 1 / depth_scaleinv
disp = (disp / disp.max() * 60000).astype(np.uint16)
# if GT depth is available, uncomment the following part to recover the metric depth
#pred_depth_metric = recover_metric_depth(pred_depth_ori, gt_depth)
img_name = v.split('/')[-1]
cv2.imwrite(os.path.join(image_dir_out, img_name), rgb)
# save depth
plt.imsave(os.path.join(image_dir_out, img_name[:-4]+'-depth.png'), pred_depth_ori, cmap='rainbow')
cv2.imwrite(os.path.join(image_dir_out, img_name[:-4]+'-depth_raw.png'), (pred_depth_ori/pred_depth_ori.max() * 60000).astype(np.uint16))
# save disp
cv2.imwrite(os.path.join(image_dir_out, img_name[:-4]+'.png'), disp)
# reconstruct point cloud from the depth
reconstruct_depth(depth_scaleinv, rgb[:, :, ::-1], image_dir_out, img_name[:-4]+'-pcd', focal=focal_length)
| {
"repo_name": "aim-uofa/AdelaiDepth",
"stars": "926",
"repo_language": "Python",
"file_name": "train.py",
"mime_type": "text/x-script.python"
} |
export PYTHONPATH=../../Train:${PYTHONPATH}
python ../tools/train.py \
--dataroot datasets \
--backbone resnet50 \
--dataset_list demo \
--batchsize 2 \
--base_lr 0.001 \
--use_tfboard \
--thread 4 \
--loss_mode _ranking-edge_pairwise-normal-regress-edge_msgil-normal_meanstd-tanh_pairwise-normal-regress-plane_ranking-edge-auxi_meanstd-tanh-auxi_ \
--epoch 2 \
--lr_scheduler_multiepochs 1 \
--val_step 50 \
--snapshot_iters 50 \
--log_interval 5
| {
"repo_name": "aim-uofa/AdelaiDepth",
"stars": "926",
"repo_language": "Python",
"file_name": "train.py",
"mime_type": "text/x-script.python"
} |
export PYTHONPATH=../../Train:${PYTHONPATH}
export CUDA_VISIBLE_DEVICES=0,1,2,3
python ../tools/train.py \
--dataroot datasets \
--backbone resnet50 \
--dataset_list taskonomy DiverseDepth HRWSI Holopix50k \
--batchsize 16 \
--base_lr 0.001 \
--use_tfboard \
--thread 4 \
--loss_mode _ranking-edge_pairwise-normal-regress-edge_msgil-normal_meanstd-tanh_pairwise-normal-regress-plane_ranking-edge-auxi_meanstd-tanh-auxi_ \
--epoch 50 \
--lr_scheduler_multiepochs 10 25 40 \
--val_step 5000 \
--snapshot_iters 5000 \
--log_interval 10
| {
"repo_name": "aim-uofa/AdelaiDepth",
"stars": "926",
"repo_language": "Python",
"file_name": "train.py",
"mime_type": "text/x-script.python"
} |
export PYTHONPATH=../../Train:${PYTHONPATH}
export CUDA_VISIBLE_DEVICES=0
TIME=`date +%Y-%m-%d_%H-%M-%S`
LOG="./$TIME.txt"
python ../tools/test_multiauxiv2_nyu.py \
--dataroot ./datasets \
--batchsize 1 \
--load_ckpt path_to_ckpt.pth \
$1 2>&1 | tee $LOG
| {
"repo_name": "aim-uofa/AdelaiDepth",
"stars": "926",
"repo_language": "Python",
"file_name": "train.py",
"mime_type": "text/x-script.python"
} |
import torch
import torch.nn as nn
class MSGIL_NORM_Loss(nn.Module):
"""
Our proposed GT normalized Multi-scale Gradient Loss Fuction.
"""
def __init__(self, scale=4, valid_threshold=-1e-8, max_threshold=1e8):
super(MSGIL_NORM_Loss, self).__init__()
self.scales_num = scale
self.valid_threshold = valid_threshold
self.max_threshold = max_threshold
self.EPSILON = 1e-6
def one_scale_gradient_loss(self, pred_scale, gt, mask):
mask_float = mask.to(dtype=pred_scale.dtype, device=pred_scale.device)
d_diff = pred_scale - gt
v_mask = torch.mul(mask_float[:, :, :-2, :], mask_float[:, :, 2:, :])
v_gradient = torch.abs(d_diff[:, :, :-2, :] - d_diff[:, :, 2:, :])
v_gradient = torch.mul(v_gradient, v_mask)
h_gradient = torch.abs(d_diff[:, :, :, :-2] - d_diff[:, :, :, 2:])
h_mask = torch.mul(mask_float[:, :, :, :-2], mask_float[:, :, :, 2:])
h_gradient = torch.mul(h_gradient, h_mask)
valid_num = torch.sum(h_mask) + torch.sum(v_mask)
gradient_loss = torch.sum(h_gradient) + torch.sum(v_gradient)
gradient_loss = gradient_loss / (valid_num + 1e-8)
return gradient_loss
def transform(self, gt):
# Get mean and standard deviation
data_mean = []
data_std_dev = []
for i in range(gt.shape[0]):
gt_i = gt[i]
mask = gt_i > 0
depth_valid = gt_i[mask]
if depth_valid.shape[0] < 10:
data_mean.append(torch.tensor(0).cuda())
data_std_dev.append(torch.tensor(1).cuda())
continue
size = depth_valid.shape[0]
depth_valid_sort, _ = torch.sort(depth_valid, 0)
depth_valid_mask = depth_valid_sort[int(size*0.1): -int(size*0.1)]
data_mean.append(depth_valid_mask.mean())
data_std_dev.append(depth_valid_mask.std())
data_mean = torch.stack(data_mean, dim=0).cuda()
data_std_dev = torch.stack(data_std_dev, dim=0).cuda()
return data_mean, data_std_dev
def forward(self, pred, gt):
mask = gt > self.valid_threshold
grad_term = 0.0
gt_mean, gt_std = self.transform(gt)
gt_trans = (gt - gt_mean[:, None, None, None]) / (gt_std[:, None, None, None] + 1e-8)
for i in range(self.scales_num):
step = pow(2, i)
d_gt = gt_trans[:, :, ::step, ::step]
d_pred = pred[:, :, ::step, ::step]
d_mask = mask[:, :, ::step, ::step]
grad_term += self.one_scale_gradient_loss(d_pred, d_gt, d_mask)
return grad_term
if __name__ == '__main__':
msgi_loss = MSGIL_NORM_Loss()
pred_depth = torch.rand([2, 1, 385, 513]).cuda()
gt_depth = torch.rand([2, 1, 385, 513]).cuda()
loss = msgi_loss(pred_depth, gt_depth)
print(loss)
| {
"repo_name": "aim-uofa/AdelaiDepth",
"stars": "926",
"repo_language": "Python",
"file_name": "train.py",
"mime_type": "text/x-script.python"
} |
import torch
import torch.nn as nn
class MEADSTD_TANH_NORM_Loss(nn.Module):
"""
loss = MAE((d-u)/s - d') + MAE(tanh(0.01*(d-u)/s) - tanh(0.01*d'))
"""
def __init__(self, valid_threshold=-1e-8, max_threshold=1e8):
super(MEADSTD_TANH_NORM_Loss, self).__init__()
self.valid_threshold = valid_threshold
self.max_threshold = max_threshold
#self.thres1 = 0.9
def transform(self, gt):
# Get mean and standard deviation
data_mean = []
data_std_dev = []
for i in range(gt.shape[0]):
gt_i = gt[i]
mask = gt_i > 0
depth_valid = gt_i[mask]
if depth_valid.shape[0] < 10:
data_mean.append(torch.tensor(0).cuda())
data_std_dev.append(torch.tensor(1).cuda())
continue
size = depth_valid.shape[0]
depth_valid_sort, _ = torch.sort(depth_valid, 0)
depth_valid_mask = depth_valid_sort[int(size*0.1): -int(size*0.1)]
data_mean.append(depth_valid_mask.mean())
data_std_dev.append(depth_valid_mask.std())
data_mean = torch.stack(data_mean, dim=0).cuda()
data_std_dev = torch.stack(data_std_dev, dim=0).cuda()
return data_mean, data_std_dev
def forward(self, pred, gt):
"""
Calculate loss.
"""
mask = (gt > self.valid_threshold) & (gt < self.max_threshold) # [b, c, h, w]
mask_sum = torch.sum(mask, dim=(1, 2, 3))
# mask invalid batches
mask_batch = mask_sum > 100
if True not in mask_batch:
return torch.tensor(0.0, dtype=torch.float).cuda()
mask_maskbatch = mask[mask_batch]
pred_maskbatch = pred[mask_batch]
gt_maskbatch = gt[mask_batch]
gt_mean, gt_std = self.transform(gt_maskbatch)
gt_trans = (gt_maskbatch - gt_mean[:, None, None, None]) / (gt_std[:, None, None, None] + 1e-8)
B, C, H, W = gt_maskbatch.shape
loss = 0
loss_tanh = 0
for i in range(B):
mask_i = mask_maskbatch[i, ...]
pred_depth_i = pred_maskbatch[i, ...][mask_i]
gt_trans_i = gt_trans[i, ...][mask_i]
depth_diff = torch.abs(gt_trans_i - pred_depth_i)
loss += torch.mean(depth_diff)
tanh_norm_gt = torch.tanh(0.01*gt_trans_i)
tanh_norm_pred = torch.tanh(0.01*pred_depth_i)
loss_tanh += torch.mean(torch.abs(tanh_norm_gt - tanh_norm_pred))
loss_out = loss/B + loss_tanh/B
return loss_out.float()
if __name__ == '__main__':
ilnr_loss = MEADSTD_TANH_NORM_Loss()
pred_depth = torch.rand([3, 1, 385, 513]).cuda()
gt_depth = torch.rand([3, 1, 385, 513]).cuda()
loss = ilnr_loss(pred_depth, gt_depth)
print(loss)
| {
"repo_name": "aim-uofa/AdelaiDepth",
"stars": "926",
"repo_language": "Python",
"file_name": "train.py",
"mime_type": "text/x-script.python"
} |
import torch
from torch import nn
import numpy as np
import torch.nn.functional as F
"""
Sampling strategies: RS (Random Sampling), EGS (Edge-Guided Sampling), and IGS (Instance-Guided Sampling)
"""
###########
# RANDOM SAMPLING
# input:
# inputs[i,:], targets[i, :], masks[i, :], self.mask_value, self.point_pairs
# return:
# inputs_A, inputs_B, targets_A, targets_B, consistent_masks_A, consistent_masks_B
###########
def randomSampling(inputs, targets, masks, threshold, sample_num):
# find A-B point pairs from predictions
inputs_index = torch.masked_select(inputs, targets.gt(threshold))
num_effect_pixels = len(inputs_index)
shuffle_effect_pixels = torch.randperm(num_effect_pixels).cuda()
inputs_A = inputs_index[shuffle_effect_pixels[0:sample_num*2:2]]
inputs_B = inputs_index[shuffle_effect_pixels[1:sample_num*2:2]]
# find corresponding pairs from GT
target_index = torch.masked_select(targets, targets.gt(threshold))
targets_A = target_index[shuffle_effect_pixels[0:sample_num*2:2]]
targets_B = target_index[shuffle_effect_pixels[1:sample_num*2:2]]
# only compute the losses of point pairs with valid GT
consistent_masks_index = torch.masked_select(masks, targets.gt(threshold))
consistent_masks_A = consistent_masks_index[shuffle_effect_pixels[0:sample_num*2:2]]
consistent_masks_B = consistent_masks_index[shuffle_effect_pixels[1:sample_num*2:2]]
# The amount of A and B should be the same!!
if len(targets_A) > len(targets_B):
targets_A = targets_A[:-1]
inputs_A = inputs_A[:-1]
consistent_masks_A = consistent_masks_A[:-1]
return inputs_A, inputs_B, targets_A, targets_B, consistent_masks_A, consistent_masks_B
###########
# EDGE-GUIDED SAMPLING
# input:
# inputs[i,:], targets[i, :], masks[i, :], edges_img[i], thetas_img[i], masks[i, :], h, w
# return:
# inputs_A, inputs_B, targets_A, targets_B, masks_A, masks_B
###########
def ind2sub(idx, cols):
r = idx / cols
c = idx - r * cols
return r, c
def sub2ind(r, c, cols):
idx = r * cols + c
return idx
def edgeGuidedSampling(inputs, targets, edges_img, thetas_img, masks, h, w):
# find edges
edges_max = edges_img.max()
edges_mask = edges_img.ge(edges_max*0.1)
edges_loc = edges_mask.nonzero()
inputs_edge = torch.masked_select(inputs, edges_mask)
targets_edge = torch.masked_select(targets, edges_mask)
thetas_edge = torch.masked_select(thetas_img, edges_mask)
minlen = inputs_edge.size()[0]
# find anchor points (i.e, edge points)
sample_num = minlen
index_anchors = torch.randint(0, minlen, (sample_num,), dtype=torch.long).cuda()
anchors = torch.gather(inputs_edge, 0, index_anchors)
theta_anchors = torch.gather(thetas_edge, 0, index_anchors)
row_anchors, col_anchors = ind2sub(edges_loc[index_anchors].squeeze(1), w)
## compute the coordinates of 4-points, distances are from [2, 30]
distance_matrix = torch.randint(2, 31, (4,sample_num)).cuda()
pos_or_neg = torch.ones(4, sample_num).cuda()
pos_or_neg[:2,:] = -pos_or_neg[:2,:]
distance_matrix = distance_matrix.float() * pos_or_neg
col = col_anchors.unsqueeze(0).expand(4, sample_num).long() + torch.round(distance_matrix.double() * torch.cos(theta_anchors).unsqueeze(0)).long()
row = row_anchors.unsqueeze(0).expand(4, sample_num).long() + torch.round(distance_matrix.double() * torch.sin(theta_anchors).unsqueeze(0)).long()
# constrain 0=<c<=w, 0<=r<=h
# Note: index should minus 1
col[col<0] = 0
col[col>w-1] = w-1
row[row<0] = 0
row[row>h-1] = h-1
# a-b, b-c, c-d
a = sub2ind(row[0,:], col[0,:], w)
b = sub2ind(row[1,:], col[1,:], w)
c = sub2ind(row[2,:], col[2,:], w)
d = sub2ind(row[3,:], col[3,:], w)
A = torch.cat((a,b,c), 0)
B = torch.cat((b,c,d), 0)
inputs_A = torch.gather(inputs, 0, A.long())
inputs_B = torch.gather(inputs, 0, B.long())
targets_A = torch.gather(targets, 0, A.long())
targets_B = torch.gather(targets, 0, B.long())
masks_A = torch.gather(masks, 0, A.long())
masks_B = torch.gather(masks, 0, B.long())
return inputs_A, inputs_B, targets_A, targets_B, masks_A, masks_B, sample_num
######################################################
# EdgeguidedRankingLoss (with regularization term)
# Please comment regularization_loss if you don't want to use multi-scale gradient matching term
#####################################################
class EdgeguidedRankingLoss(nn.Module):
def __init__(self, point_pairs=10000, sigma=0.03, alpha=1.0, mask_value=-1e-8):
super(EdgeguidedRankingLoss, self).__init__()
self.point_pairs = point_pairs # number of point pairs
self.sigma = sigma # used for determining the ordinal relationship between a selected pair
self.alpha = alpha # used for balancing the effect of = and (<,>)
self.mask_value = mask_value
#self.regularization_loss = GradientLoss(scales=4)
def getEdge(self, images):
n,c,h,w = images.size()
a = torch.Tensor([[-1, 0, 1], [-2, 0, 2], [-1, 0, 1]]).cuda().view((1,1,3,3)).repeat(1, 1, 1, 1)
b = torch.Tensor([[1, 2, 1], [0, 0, 0], [-1, -2, -1]]).cuda().view((1,1,3,3)).repeat(1, 1, 1, 1)
if c == 3:
gradient_x = F.conv2d(images[:,0,:,:].unsqueeze(1), a)
gradient_y = F.conv2d(images[:,0,:,:].unsqueeze(1), b)
else:
gradient_x = F.conv2d(images, a)
gradient_y = F.conv2d(images, b)
edges = torch.sqrt(torch.pow(gradient_x,2)+ torch.pow(gradient_y,2))
edges = F.pad(edges, (1,1,1,1), "constant", 0)
thetas = torch.atan2(gradient_y, gradient_x)
thetas = F.pad(thetas, (1,1,1,1), "constant", 0)
return edges, thetas
def forward(self, inputs, targets, images, masks=None):
if masks == None:
masks = targets > self.mask_value
# Comment this line if you don't want to use the multi-scale gradient matching term !!!
# regularization_loss = self.regularization_loss(inputs.squeeze(1), targets.squeeze(1), masks.squeeze(1))
# find edges from RGB
edges_img, thetas_img = self.getEdge(images)
#=============================
n,c,h,w = targets.size()
if n != 1:
inputs = inputs.view(n, -1).double()
targets = targets.view(n, -1).double()
masks = masks.view(n, -1).double()
edges_img = edges_img.view(n, -1).double()
thetas_img = thetas_img.view(n, -1).double()
else:
inputs = inputs.contiguous().view(1, -1).double()
targets = targets.contiguous().view(1, -1).double()
masks = masks.contiguous().view(1, -1).double()
edges_img = edges_img.contiguous().view(1, -1).double()
thetas_img = thetas_img.contiguous().view(1, -1).double()
# initialization
loss = torch.DoubleTensor([0.0]).cuda()
for i in range(n):
# Edge-Guided sampling
inputs_A, inputs_B, targets_A, targets_B, masks_A, masks_B, sample_num = edgeGuidedSampling(inputs[i,:], targets[i, :], edges_img[i], thetas_img[i], masks[i, :], h, w)
# Random Sampling
random_sample_num = sample_num
random_inputs_A, random_inputs_B, random_targets_A, random_targets_B, random_masks_A, random_masks_B = randomSampling(inputs[i,:], targets[i, :], masks[i, :], self.mask_value, random_sample_num)
# Combine EGS + RS
inputs_A = torch.cat((inputs_A, random_inputs_A), 0)
inputs_B = torch.cat((inputs_B, random_inputs_B), 0)
targets_A = torch.cat((targets_A, random_targets_A), 0)
targets_B = torch.cat((targets_B, random_targets_B), 0)
masks_A = torch.cat((masks_A, random_masks_A), 0)
masks_B = torch.cat((masks_B, random_masks_B), 0)
#GT ordinal relationship
target_ratio = torch.div(targets_A+1e-6, targets_B+1e-6)
mask_eq = target_ratio.lt(1.0 + self.sigma) * target_ratio.gt(1.0/(1.0+self.sigma))
labels = torch.zeros_like(target_ratio)
labels[target_ratio.ge(1.0 + self.sigma)] = 1
labels[target_ratio.le(1.0/(1.0+self.sigma))] = -1
# consider forward-backward consistency checking, i.e, only compute losses of point pairs with valid GT
consistency_mask = masks_A * masks_B
equal_loss = (inputs_A - inputs_B).pow(2) * mask_eq.double() * consistency_mask
unequal_loss = torch.log(1 + torch.exp((-inputs_A + inputs_B) * labels)) * (~mask_eq).double() * consistency_mask
# Please comment the regularization term if you don't want to use the multi-scale gradient matching loss !!!
loss = loss + self.alpha * equal_loss.mean() + 1.0 * unequal_loss.mean() #+ 0.2 * regularization_loss.double()
return loss[0].float()/n
if __name__ == '__main__':
import cv2
rank_loss = EdgeguidedRankingLoss()
pred_depth = np.random.randn(2, 1, 480, 640)
gt_depth = np.random.randn(2, 1, 480, 640)
# gt_depth = cv2.imread('/hardware/yifanliu/SUNRGBD/sunrgbd-meta-data/sunrgbd_test_depth/2.png', -1)
# gt_depth = gt_depth[None, :, :, None]
# pred_depth = gt_depth[:, :, ::-1, :]
gt_depth = torch.tensor(np.asarray(gt_depth, np.float32)).cuda()
pred_depth = torch.tensor(np.asarray(pred_depth, np.float32)).cuda()
loss = rank_loss(pred_depth, gt_depth)
print(loss)
| {
"repo_name": "aim-uofa/AdelaiDepth",
"stars": "926",
"repo_language": "Python",
"file_name": "train.py",
"mime_type": "text/x-script.python"
} |
import torch
import torch.nn as nn
import numpy as np
class PWNPlanesLoss(nn.Module):
"""
Virtual Normal Loss Function.
"""
def __init__(self, focal_x, focal_y, input_size,
delta_cos=0.867, delta_diff_x=0.007,
delta_diff_y=0.007, sample_groups=5000, xyz_mode = 'uvd'):
"""
Virtual normal planes loss, which constrain points to be on the same 3D plane.
:para focal_x: folcal length fx
:para focal_y: folcal length fy
:para input_size: input image size
:para delta_cos: a threshold for the angle among three point, three points should not be on the same plane
:para delta_diff_x: a threshold for the distance among three points along the x axis
:para delta_diff_y: a threshold for the distance among three points along the y axis
:para sample_groups: sample groups number, each group with 3 points can construct a plane
:para xyz_mode: using (u, v, d) or (x, y, z) to construct the virtual planes
"""
super(PWNPlanesLoss, self).__init__()
self.fx = torch.tensor([focal_x], dtype=torch.float32).cuda()
self.fy = torch.tensor([focal_y], dtype=torch.float32).cuda()
self.input_size = input_size
self.u0 = torch.tensor(input_size[1] // 2, dtype=torch.float32).cuda()
self.v0 = torch.tensor(input_size[0] // 2, dtype=torch.float32).cuda()
self.init_image_coor()
self.delta_cos = delta_cos
self.delta_diff_x = delta_diff_x
self.delta_diff_y = delta_diff_y
self.sample_groups = sample_groups
self.xyz_mode = xyz_mode
def init_image_coor(self):
x_row = np.arange(0, self.input_size[1])
x = np.tile(x_row, (self.input_size[0], 1))
x = x[np.newaxis, :, :]
x = x.astype(np.float32)
x = torch.from_numpy(x.copy()).cuda()
self.u_u0 = x - self.u0
y_col = np.arange(0, self.input_size[0]) # y_col = np.arange(0, height)
y = np.tile(y_col, (self.input_size[1], 1)).T
y = y[np.newaxis, :, :]
y = y.astype(np.float32)
y = torch.from_numpy(y.copy()).cuda()
self.v_v0 = y - self.v0
def transfer_uvz(self, depth):
max_uv = self.u_u0.max()
u = self.u_u0.repeat((depth.shape[0], 1, 1, 1)) / max_uv
v = self.v_v0.repeat((depth.shape[0], 1, 1, 1)) / max_uv
z = depth
pw = torch.cat([u, v, z], 1).permute(0, 2, 3, 1) # [b, h, w, c]
return pw
def transfer_xyz(self, depth):
x = self.u_u0 * torch.abs(depth) / self.fx
y = self.v_v0 * torch.abs(depth) / self.fy
z = depth
pw = torch.cat([x, y, z], 1).permute(0, 2, 3, 1) # [b, h, w, c]
return pw
def select_index(self, mask_kp):
x, _, h, w = mask_kp.shape
select_size = int(3 * self.sample_groups)
p1_x = []
p1_y = []
p2_x = []
p2_y = []
p3_x = []
p3_y = []
valid_batch = torch.ones((x, 1), dtype=torch.bool)
for i in range(x):
mask_kp_i = mask_kp[i, 0, :, :]
valid_points = torch.nonzero(mask_kp_i)
if valid_points.shape[0] < select_size * 0.6:
valid_points = torch.nonzero(~mask_kp_i.to(torch.uint8))
valid_batch[i, :] = False
elif valid_points.shape[0] < select_size:
repeat_idx = torch.randperm(valid_points.shape[0])[:select_size - valid_points.shape[0]]
valid_repeat = valid_points[repeat_idx]
valid_points = torch.cat((valid_points, valid_repeat), 0)
else:
valid_points = valid_points
"""
if valid_points.shape[0] <= select_size:
valid_points = torch.nonzero(~mask_kp_i.to(torch.uint8))
valid_batch[i, :] = False
"""
select_indx = torch.randperm(valid_points.size(0))
p1 = valid_points[select_indx[0:select_size:3]]
p2 = valid_points[select_indx[1:select_size:3]]
p3 = valid_points[select_indx[2:select_size:3]]
p1_x.append(p1[:, 1])
p1_y.append(p1[:, 0])
p2_x.append(p2[:, 1])
p2_y.append(p2[:, 0])
p3_x.append(p3[:, 1])
p3_y.append(p3[:, 0])
p123 = {'p1_x': torch.stack(p1_x), 'p1_y': torch.stack(p1_y),
'p2_x': torch.stack(p2_x), 'p2_y': torch.stack(p2_y),
'p3_x': torch.stack(p3_x), 'p3_y': torch.stack(p3_y),
'valid_batch': valid_batch}
return p123
def form_pw_groups(self, p123, pw):
"""
Form 3D points groups, with 3 points in each grouup.
:param p123: points index
:param pw: 3D points, # [1, h, w, c]
:return:
"""
p1_x = p123['p1_x']
p1_y = p123['p1_y']
p2_x = p123['p2_x']
p2_y = p123['p2_y']
p3_x = p123['p3_x']
p3_y = p123['p3_y']
batch_list = np.arange(0, p1_x.shape[0])[:, np.newaxis]
pw = pw.repeat((p1_x.shape[0], 1, 1, 1))
pw1 = pw[batch_list, p1_y, p1_x, :]
pw2 = pw[batch_list, p2_y, p2_x, :]
pw3 = pw[batch_list, p3_y, p3_x, :]
# [B, N, 3(x,y,z), 3(p1,p2,p3)]
pw_groups = torch.cat([pw1[:, :, :, np.newaxis], pw2[:, :, :, np.newaxis], pw3[:, :, :, np.newaxis]], 3)
return pw_groups
def filter_mask(self, pw_pred):
"""
:param pw_pred: constructed 3d vector (x, y, disp), [B, N, 3(x,y,z), 3(p1,p2,p3)]
"""
xy12 = pw_pred[:, :, 0:2, 1] - pw_pred[:, :, 0:2, 0]
xy13 = pw_pred[:, :, 0:2, 2] - pw_pred[:, :, 0:2, 0]
xy23 = pw_pred[:, :, 0:2, 2] - pw_pred[:, :, 0:2, 1]
# Ignore linear
xy_diff = torch.cat([xy12[:, :, :, np.newaxis], xy13[:, :, :, np.newaxis], xy23[:, :, :, np.newaxis]],
3) # [b, n, 2(xy), 3]
m_batchsize, groups, coords, index = xy_diff.shape
proj_query = xy_diff.view(m_batchsize * groups, -1, index).permute(0, 2, 1) # [bn, 3(p123), 2(xy)]
proj_key = xy_diff.view(m_batchsize * groups, -1, index) # [bn, 2(xy), 3(p123)]
q_norm = proj_query.norm(2, dim=2) # [bn, 3(p123)]
nm = torch.bmm(q_norm.view(m_batchsize * groups, index, 1), q_norm.view(m_batchsize * groups, 1, index)) # []
energy = torch.bmm(proj_query, proj_key) # transpose check [bn, 3(p123), 3(p123)]
norm_energy = energy / (nm + 1e-8)
norm_energy = norm_energy.view(m_batchsize * groups, -1) # [bn, 9(p123)]
mask_cos = torch.sum((norm_energy > self.delta_cos) + (norm_energy < -self.delta_cos), 1) > 3 # igonre
mask_cos = mask_cos.view(m_batchsize, groups) # [b, n] # igonre
#ignore near
mask_x = torch.sum(torch.abs(xy_diff[:, :, 0, :]) < self.delta_diff_x, 2) > 0
mask_y = torch.sum(torch.abs(xy_diff[:, :, 1, :]) < self.delta_diff_y, 2) > 0
mask_near = mask_x & mask_y
mask_valid_pts = ~(mask_cos | mask_near)
return mask_valid_pts
def select_points_groups(self, pred_depth, mask_kp):
p123 = self.select_index(mask_kp) # p1_x: [x, N]
uvz_pred = self.transfer_uvz(pred_depth) #[1, h, w, 3(xyz)]
uvz_groups_pred = self.form_pw_groups(p123, uvz_pred) # [x, N, 3(x,y,z), 3(p1,p2,p3)]
# mask:[b, n]
mask_valid_pts = (self.filter_mask(uvz_groups_pred)).to(torch.bool) # [x, n]
mask_valid_batch = p123['valid_batch'].repeat(1, mask_valid_pts.shape[1]) # [x, n]
mask_valid = mask_valid_pts & mask_valid_batch.cuda() # [x, n]
if self.xyz_mode == 'uvd':
pw_groups_pred = uvz_groups_pred
else:
xyz_pred = self.transfer_xyz(pred_depth) # [1, h, w, 3(xyz)]
pw_groups_pred = self.form_pw_groups(p123, xyz_pred) # [x, N, 3(x,y,z), 3(p1,p2,p3)]
return pw_groups_pred, mask_valid
def constrain_a_plane_loss(self, pw_groups_pre_i, mask_valid_i):
"""
pw_groups_pre: selected points groups for the i-th plane,
"""
if torch.sum(mask_valid_i) < 2:
return [0.0, 0]
pw_groups_pred_i = pw_groups_pre_i[mask_valid_i] # [n, 3, 3]
p12 = pw_groups_pred_i[:, :, 1] - pw_groups_pred_i[:, :, 0]
p13 = pw_groups_pred_i[:, :, 2] - pw_groups_pred_i[:, :, 0]
virtual_normal = torch.cross(p12, p13, dim=1) # [n, 3]
norm = torch.norm(virtual_normal, 2, dim=1, keepdim=True)
virtual_normal = virtual_normal / (norm + 1e-8)
# re-orient normals consistently
orient_mask = torch.sum(torch.squeeze(virtual_normal) * torch.squeeze(pw_groups_pred_i[:, :, 0]), dim=1) > 0
virtual_normal[orient_mask] *= -1
#direct = virtual_normal[:, 2] / torch.abs(virtual_normal[:, 2])
#virtual_normal = virtual_normal / direct[:, None] # [n, 3]
aver_normal = torch.sum(virtual_normal, dim=0)
aver_norm = torch.norm(aver_normal, 2, dim=0, keepdim=True)
aver_normal = aver_normal / (aver_norm + 1e-5) # [3]
cos_diff = 1.0 - torch.sum(virtual_normal * aver_normal, dim=1)
loss = torch.sum(cos_diff, dim=0)
valid_num = cos_diff.numel()
return loss, valid_num
def forward(self, gt_depth, pred_depth, mask, focal_length=None):
"""
Virtual normal loss.
:param pred_depth: predicted depth map, [B,C,H,W]
:param mask: mask for planes, each plane is noted with a value, [B, C, H, W]
:param focal_length: focal length
"""
B, _, _, _ = pred_depth.shape
loss = torch.tensor(0.0).cuda()
valid_planes_num = 0
for i in range(B):
self.fx = focal_length[i] if focal_length is not None else 256.0
self.fy = focal_length[i] if focal_length is not None else 256.0
pred_depth_i = pred_depth[i, :]
mask_i = mask[i, :][None, :, :]
unique_planes = torch.unique(mask_i)
planes = [mask_i == m for m in unique_planes if m != 0] #[x, 1, h, w] x is the planes number
if len(planes) == 0:
continue
mask_planes = torch.stack(planes, dim=0) #torch.cat(planes, dim=0) #
pw_groups_pred, mask_valid = self.select_points_groups(pred_depth_i[None, :, :, :], mask_planes) # [x, N, 3(x,y,z), 3(p1,p2,p3)]
for j in range(unique_planes.numel()-1):
mask_valid_j = mask_valid[j, :]
pw_groups_pred_j = pw_groups_pred[j, :]
loss_tmp, valid_angles = self.constrain_a_plane_loss(pw_groups_pred_j, mask_valid_j)
valid_planes_num += valid_angles
loss += loss_tmp
loss /= (valid_planes_num + 1e-8)
return loss
if __name__ == '__main__':
import cv2
vnl_loss = PWN_Planes_Loss(500.0, 500.0, (385, 513), xyz_mode='uvd')
pred_depth = torch.rand([2, 1, 385, 513]).cuda()
gt_depth = torch.rand([2, 1, 385, 513]).cuda()
gt_depth[:, :, 3:20, 40:60] = 0
mask_kp1 = pred_depth > 0.9
mask_kp2 = pred_depth < 0.5
mask = torch.zeros_like(gt_depth, dtype=torch.uint8)
mask = 1*mask_kp1 + 2* mask_kp2
#gt_depth = cv2.imread('/hardware/yifanliu/SUNRGBD/sunrgbd-meta-data/sunrgbd_test_depth/2.png', -1)
#gt_depth = gt_depth[None, :, :, None]
#pred_depth = gt_depth[:, :, ::-1, :]
loss = vnl_loss(gt_depth, gt_depth, mask)
print(loss)
| {
"repo_name": "aim-uofa/AdelaiDepth",
"stars": "926",
"repo_language": "Python",
"file_name": "train.py",
"mime_type": "text/x-script.python"
} |
import torch
import numpy as np
import torch.nn as nn
def init_image_coor(height, width):
x_row = np.arange(0, width)
x = np.tile(x_row, (height, 1))
x = x[np.newaxis, :, :]
x = x.astype(np.float32)
x = torch.from_numpy(x.copy()).cuda()
u_u0 = x - width/2.0
y_col = np.arange(0, height) # y_col = np.arange(0, height)
y = np.tile(y_col, (width, 1)).T
y = y[np.newaxis, :, :]
y = y.astype(np.float32)
y = torch.from_numpy(y.copy()).cuda()
v_v0 = y - height/2.0
return u_u0, v_v0
def depth_to_xyz(depth, focal_length):
b, c, h, w = depth.shape
u_u0, v_v0 = init_image_coor(h, w)
x = u_u0 * depth / focal_length
y = v_v0 * depth / focal_length
z = depth
pw = torch.cat([x, y, z], 1).permute(0, 2, 3, 1) # [b, h, w, c]
return pw
def get_surface_normal(xyz, patch_size=5):
# xyz: [1, h, w, 3]
x, y, z = torch.unbind(xyz, dim=3)
x = torch.unsqueeze(x, 0)
y = torch.unsqueeze(y, 0)
z = torch.unsqueeze(z, 0)
xx = x * x
yy = y * y
zz = z * z
xy = x * y
xz = x * z
yz = y * z
patch_weight = torch.ones((1, 1, patch_size, patch_size), requires_grad=False).cuda()
xx_patch = nn.functional.conv2d(xx, weight=patch_weight, padding=int(patch_size / 2))
yy_patch = nn.functional.conv2d(yy, weight=patch_weight, padding=int(patch_size / 2))
zz_patch = nn.functional.conv2d(zz, weight=patch_weight, padding=int(patch_size / 2))
xy_patch = nn.functional.conv2d(xy, weight=patch_weight, padding=int(patch_size / 2))
xz_patch = nn.functional.conv2d(xz, weight=patch_weight, padding=int(patch_size / 2))
yz_patch = nn.functional.conv2d(yz, weight=patch_weight, padding=int(patch_size / 2))
ATA = torch.stack([xx_patch, xy_patch, xz_patch, xy_patch, yy_patch, yz_patch, xz_patch, yz_patch, zz_patch],
dim=4)
ATA = torch.squeeze(ATA)
ATA = torch.reshape(ATA, (ATA.size(0), ATA.size(1), 3, 3))
eps_identity = 1e-6 * torch.eye(3, device=ATA.device, dtype=ATA.dtype)[None, None, :, :].repeat([ATA.size(0), ATA.size(1), 1, 1])
ATA = ATA + eps_identity
x_patch = nn.functional.conv2d(x, weight=patch_weight, padding=int(patch_size / 2))
y_patch = nn.functional.conv2d(y, weight=patch_weight, padding=int(patch_size / 2))
z_patch = nn.functional.conv2d(z, weight=patch_weight, padding=int(patch_size / 2))
AT1 = torch.stack([x_patch, y_patch, z_patch], dim=4)
AT1 = torch.squeeze(AT1)
AT1 = torch.unsqueeze(AT1, 3)
patch_num = 4
patch_x = int(AT1.size(1) / patch_num)
patch_y = int(AT1.size(0) / patch_num)
n_img = torch.randn(AT1.shape).cuda()
overlap = patch_size // 2 + 1
for x in range(int(patch_num)):
for y in range(int(patch_num)):
left_flg = 0 if x == 0 else 1
right_flg = 0 if x == patch_num -1 else 1
top_flg = 0 if y == 0 else 1
btm_flg = 0 if y == patch_num - 1 else 1
at1 = AT1[y * patch_y - top_flg * overlap:(y + 1) * patch_y + btm_flg * overlap,
x * patch_x - left_flg * overlap:(x + 1) * patch_x + right_flg * overlap]
ata = ATA[y * patch_y - top_flg * overlap:(y + 1) * patch_y + btm_flg * overlap,
x * patch_x - left_flg * overlap:(x + 1) * patch_x + right_flg * overlap]
n_img_tmp, _ = torch.solve(at1, ata)
n_img_tmp_select = n_img_tmp[top_flg * overlap:patch_y + top_flg * overlap, left_flg * overlap:patch_x + left_flg * overlap, :, :]
n_img[y * patch_y:y * patch_y + patch_y, x * patch_x:x * patch_x + patch_x, :, :] = n_img_tmp_select
n_img_L2 = torch.sqrt(torch.sum(n_img ** 2, dim=2, keepdim=True))
n_img_norm = n_img / n_img_L2
# re-orient normals consistently
orient_mask = torch.sum(torch.squeeze(n_img_norm) * torch.squeeze(xyz), dim=2) > 0
n_img_norm[orient_mask] *= -1
return n_img_norm
def get_surface_normalv2(xyz, patch_size=5):
"""
xyz: xyz coordinates
patch: [p1, p2, p3,
p4, p5, p6,
p7, p8, p9]
surface_normal = [(p9-p1) x (p3-p7)] + [(p6-p4) - (p8-p2)]
return: normal [h, w, 3, b]
"""
b, h, w, c = xyz.shape
half_patch = patch_size // 2
xyz_pad = torch.zeros((b, h + patch_size - 1, w + patch_size - 1, c), dtype=xyz.dtype, device=xyz.device)
xyz_pad[:, half_patch:-half_patch, half_patch:-half_patch, :] = xyz
# xyz_left_top = xyz_pad[:, :h, :w, :] # p1
# xyz_right_bottom = xyz_pad[:, -h:, -w:, :]# p9
# xyz_left_bottom = xyz_pad[:, -h:, :w, :] # p7
# xyz_right_top = xyz_pad[:, :h, -w:, :] # p3
# xyz_cross1 = xyz_left_top - xyz_right_bottom # p1p9
# xyz_cross2 = xyz_left_bottom - xyz_right_top # p7p3
xyz_left = xyz_pad[:, half_patch:half_patch + h, :w, :] # p4
xyz_right = xyz_pad[:, half_patch:half_patch + h, -w:, :] # p6
xyz_top = xyz_pad[:, :h, half_patch:half_patch + w, :] # p2
xyz_bottom = xyz_pad[:, -h:, half_patch:half_patch + w, :] # p8
xyz_horizon = xyz_left - xyz_right # p4p6
xyz_vertical = xyz_top - xyz_bottom # p2p8
xyz_left_in = xyz_pad[:, half_patch:half_patch + h, 1:w+1, :] # p4
xyz_right_in = xyz_pad[:, half_patch:half_patch + h, patch_size-1:patch_size-1+w, :] # p6
xyz_top_in = xyz_pad[:, 1:h+1, half_patch:half_patch + w, :] # p2
xyz_bottom_in = xyz_pad[:, patch_size-1:patch_size-1+h, half_patch:half_patch + w, :] # p8
xyz_horizon_in = xyz_left_in - xyz_right_in # p4p6
xyz_vertical_in = xyz_top_in - xyz_bottom_in # p2p8
n_img_1 = torch.cross(xyz_horizon_in, xyz_vertical_in, dim=3)
n_img_2 = torch.cross(xyz_horizon, xyz_vertical, dim=3)
# re-orient normals consistently
orient_mask = torch.sum(n_img_1 * xyz, dim=3) > 0
n_img_1[orient_mask] *= -1
orient_mask = torch.sum(n_img_2 * xyz, dim=3) > 0
n_img_2[orient_mask] *= -1
n_img1_L2 = torch.sqrt(torch.sum(n_img_1 ** 2, dim=3, keepdim=True))
n_img1_norm = n_img_1 / (n_img1_L2 + 1e-8)
n_img2_L2 = torch.sqrt(torch.sum(n_img_2 ** 2, dim=3, keepdim=True))
n_img2_norm = n_img_2 / (n_img2_L2 + 1e-8)
# average 2 norms
n_img_aver = n_img1_norm + n_img2_norm
n_img_aver_L2 = torch.sqrt(torch.sum(n_img_aver ** 2, dim=3, keepdim=True))
n_img_aver_norm = n_img_aver / (n_img_aver_L2 + 1e-8)
# re-orient normals consistently
orient_mask = torch.sum(n_img_aver_norm * xyz, dim=3) > 0
n_img_aver_norm[orient_mask] *= -1
n_img_aver_norm_out = n_img_aver_norm.permute((1, 2, 3, 0)) # [h, w, c, b]
# a = torch.sum(n_img1_norm_out*n_img2_norm_out, dim=2).cpu().numpy().squeeze()
# plt.imshow(np.abs(a), cmap='rainbow')
# plt.show()
return n_img_aver_norm_out#n_img1_norm.permute((1, 2, 3, 0))
def surface_normal_from_depth(depth, focal_length, valid_mask=None):
# para depth: depth map, [b, c, h, w]
b, c, h, w = depth.shape
focal_length = focal_length[:, None, None, None]
depth_filter = nn.functional.avg_pool2d(depth, kernel_size=3, stride=1, padding=1)
depth_filter = nn.functional.avg_pool2d(depth_filter, kernel_size=3, stride=1, padding=1)
xyz = depth_to_xyz(depth_filter, focal_length)
sn_batch = []
for i in range(b):
xyz_i = xyz[i, :][None, :, :, :]
normal = get_surface_normalv2(xyz_i)
sn_batch.append(normal)
sn_batch = torch.cat(sn_batch, dim=3).permute((3, 2, 0, 1)) # [b, c, h, w]
mask_invalid = (~valid_mask).repeat(1, 3, 1, 1)
sn_batch[mask_invalid] = 0.0
return sn_batch
def vis_normal(normal):
"""
Visualize surface normal. Transfer surface normal value from [-1, 1] to [0, 255]
@para normal: surface normal, [h, w, 3], numpy.array
"""
n_img_L2 = np.sqrt(np.sum(normal ** 2, axis=2, keepdims=True))
n_img_norm = normal / (n_img_L2 + 1e-8)
normal_vis = n_img_norm * 127
normal_vis += 128
normal_vis = normal_vis.astype(np.uint8)
return normal_vis
def vis_normal2(normals):
'''
Montage of normal maps. Vectors are unit length and backfaces thresholded.
'''
x = normals[:, :, 0] # horizontal; pos right
y = normals[:, :, 1] # depth; pos far
z = normals[:, :, 2] # vertical; pos up
backfacing = (z > 0)
norm = np.sqrt(np.sum(normals**2, axis=2))
zero = (norm < 1e-5)
x += 1.0; x *= 0.5
y += 1.0; y *= 0.5
z = np.abs(z)
x[zero] = 0.0
y[zero] = 0.0
z[zero] = 0.0
normals[:, :, 0] = x # horizontal; pos right
normals[:, :, 1] = y # depth; pos far
normals[:, :, 2] = z # vertical; pos up
return normals
if __name__ == '__main__':
import cv2, os
| {
"repo_name": "aim-uofa/AdelaiDepth",
"stars": "926",
"repo_language": "Python",
"file_name": "train.py",
"mime_type": "text/x-script.python"
} |
#!/usr/bin/env python
# coding: utf-8
"""
https://github.com/CSAILVision/semantic-segmentation-pytorch
"""
import os
import sys
import torch
import torch.nn as nn
import math
import torch.utils.model_zoo as model_zoo
from lib.configs.config import cfg
try:
from urllib import urlretrieve
except ImportError:
from urllib.request import urlretrieve
__all__ = ['resnext101_32x8d']
model_urls = {
'resnext50_32x4d': 'https://download.pytorch.org/models/resnext50_32x4d-7cdf4587.pth',
'resnext101_32x8d': 'https://download.pytorch.org/models/resnext101_32x8d-8ba56ff5.pth',
#'resnext101': 'http://sceneparsing.csail.mit.edu/model/pretrained_resnet/resnext101-imagenet.pth'
}
def conv3x3(in_planes, out_planes, stride=1, groups=1, dilation=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=dilation, groups=groups, bias=False, dilation=dilation)
def conv1x1(in_planes, out_planes, stride=1):
"""1x1 convolution"""
return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,
base_width=64, dilation=1, norm_layer=None):
super(BasicBlock, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
if groups != 1 or base_width != 64:
raise ValueError('BasicBlock only supports groups=1 and base_width=64')
if dilation > 1:
raise NotImplementedError("Dilation > 1 not supported in BasicBlock")
# Both self.conv1 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = norm_layer(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = norm_layer(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class Bottleneck(nn.Module):
# Bottleneck in torchvision places the stride for downsampling at 3x3 convolution(self.conv2)
# while original implementation places the stride at the first 1x1 convolution(self.conv1)
# according to "Deep residual learning for image recognition"https://arxiv.org/abs/1512.03385.
# This variant is also known as ResNet V1.5 and improves accuracy according to
# https://ngc.nvidia.com/catalog/model-scripts/nvidia:resnet_50_v1_5_for_pytorch.
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,
base_width=64, dilation=1, norm_layer=None):
super(Bottleneck, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
width = int(planes * (base_width / 64.)) * groups
# Both self.conv2 and self.downsample layers downsample the input when stride != 1
self.conv1 = conv1x1(inplanes, width)
self.bn1 = norm_layer(width)
self.conv2 = conv3x3(width, width, stride, groups, dilation)
self.bn2 = norm_layer(width)
self.conv3 = conv1x1(width, planes * self.expansion)
self.bn3 = norm_layer(planes * self.expansion)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
identity = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
identity = self.downsample(x)
out += identity
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, layers, num_classes=1000, zero_init_residual=False,
groups=1, width_per_group=64, replace_stride_with_dilation=None,
norm_layer=None):
super(ResNet, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
self._norm_layer = norm_layer
self.inplanes = 64
self.dilation = 1
if replace_stride_with_dilation is None:
# each element in the tuple indicates if we should replace
# the 2x2 stride with a dilated convolution instead
replace_stride_with_dilation = [False, False, False]
if len(replace_stride_with_dilation) != 3:
raise ValueError("replace_stride_with_dilation should be None "
"or a 3-element tuple, got {}".format(replace_stride_with_dilation))
self.groups = groups
self.base_width = width_per_group
self.conv1 = nn.Conv2d(3, self.inplanes, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = norm_layer(self.inplanes)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2,
dilate=replace_stride_with_dilation[0])
self.layer3 = self._make_layer(block, 256, layers[2], stride=2,
dilate=replace_stride_with_dilation[1])
self.layer4 = self._make_layer(block, 512, layers[3], stride=2,
dilate=replace_stride_with_dilation[2])
#self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
#self.fc = nn.Linear(512 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
# Zero-initialize the last BN in each residual branch,
# so that the residual branch starts with zeros, and each residual block behaves like an identity.
# This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677
if zero_init_residual:
for m in self.modules():
if isinstance(m, Bottleneck):
nn.init.constant_(m.bn3.weight, 0)
elif isinstance(m, BasicBlock):
nn.init.constant_(m.bn2.weight, 0)
def _make_layer(self, block, planes, blocks, stride=1, dilate=False):
norm_layer = self._norm_layer
downsample = None
previous_dilation = self.dilation
if dilate:
self.dilation *= stride
stride = 1
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
conv1x1(self.inplanes, planes * block.expansion, stride),
norm_layer(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample, self.groups,
self.base_width, previous_dilation, norm_layer))
self.inplanes = planes * block.expansion
for _ in range(1, blocks):
layers.append(block(self.inplanes, planes, groups=self.groups,
base_width=self.base_width, dilation=self.dilation,
norm_layer=norm_layer))
return nn.Sequential(*layers)
def _forward_impl(self, x):
# See note [TorchScript super()]
features = []
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
features.append(x)
x = self.layer2(x)
features.append(x)
x = self.layer3(x)
features.append(x)
x = self.layer4(x)
features.append(x)
#x = self.avgpool(x)
#x = torch.flatten(x, 1)
#x = self.fc(x)
return features
def forward(self, x):
return self._forward_impl(x)
def resnext101_32x8d(pretrained=True, **kwargs):
"""Constructs a ResNet-152 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
kwargs['groups'] = 32
kwargs['width_per_group'] = 8
model = ResNet(Bottleneck, [3, 4, 23, 3], **kwargs)
if pretrained:
pretrained_dict = model_zoo.load_url(model_urls['resnext101_32x8d'], cfg.ROOT_DIR + '/' + cfg.MODEL.MODEL_REPOSITORY)
#pretrained_model = torchvision.models.resnet152(pretrained=True)
#pretrained_model = gcv.models.resnet152(pretrained=True)
#pretrained_dict = pretrained_model.state_dict()
model_dict = model.state_dict()
pretrained_dict = {k:v for k, v in pretrained_dict.items() if k in model_dict}
model_dict.update(pretrained_dict)
model.load_state_dict(model_dict)
return model
if __name__ == '__main__':
import torch
model = resnext101_32x8d(True).cuda()
rgb = torch.rand((2, 3, 256, 256)).cuda()
out = model(rgb)
print(len(out))
"""
def load_url(url, model_dir='./pretrained', map_location=None):
if not os.path.exists(model_dir):
os.makedirs(model_dir)
filename = url.split('/')[-1]
cached_file = os.path.join(model_dir, filename)
if not os.path.exists(cached_file):
sys.stderr.write('Downloading: "{}" to {}\n'.format(url, cached_file))
urlretrieve(url, cached_file)
return torch.load(cached_file, map_location=map_location)
def conv3x3(in_planes, out_planes, stride=1):
"3x3 convolution with padding"
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
class GroupBottleneck(nn.Module):
expansion = 2
def __init__(self, inplanes, planes, stride=1, groups=1, downsample=None):
super(GroupBottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
padding=1, groups=groups, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, planes * 2, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes * 2)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class ResNeXt(nn.Module):
def __init__(self, block, layers, groups=32, num_classes=1000):
self.inplanes = 128
super(ResNeXt, self).__init__()
self.conv1 = conv3x3(3, 64, stride=2)
self.bn1 = nn.BatchNorm2d(64)
self.relu1 = nn.ReLU(inplace=True)
self.conv2 = conv3x3(64, 64)
self.bn2 = nn.BatchNorm2d(64)
self.relu2 = nn.ReLU(inplace=True)
self.conv3 = conv3x3(64, 128)
self.bn3 = nn.BatchNorm2d(128)
self.relu3 = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 128, layers[0], groups=groups)
self.layer2 = self._make_layer(block, 256, layers[1], stride=2, groups=groups)
self.layer3 = self._make_layer(block, 512, layers[2], stride=2, groups=groups)
self.layer4 = self._make_layer(block, 1024, layers[3], stride=2, groups=groups)
#self.avgpool = nn.AvgPool2d(7, stride=1)
#self.fc = nn.Linear(1024 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels // m.groups
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, stride=1, groups=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, groups, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes, groups=groups))
return nn.Sequential(*layers)
def forward(self, x):
features = []
x = self.relu1(self.bn1(self.conv1(x)))
x = self.relu2(self.bn2(self.conv2(x)))
x = self.relu3(self.bn3(self.conv3(x)))
x = self.maxpool(x)
x = self.layer1(x)
features.append(x)
x = self.layer2(x)
features.append(x)
x = self.layer3(x)
features.append(x)
x = self.layer4(x)
features.append(x)
return features
def resnext101(pretrained=True, **kwargs):
'''
Constructs a resnext-101 model.
#Args:
pretrained (bool): If True, returns a model pre-trained on Places
'''
model = ResNeXt(GroupBottleneck, [3, 4, 23, 3], **kwargs)
if pretrained:
model.load_state_dict(load_url(), strict=False)
#model.load_state_dict(torch.load('./pretrained/resnet101-imagenet.pth', map_location=None), strict=False)
return model
"""
| {
"repo_name": "aim-uofa/AdelaiDepth",
"stars": "926",
"repo_language": "Python",
"file_name": "train.py",
"mime_type": "text/x-script.python"
} |
import torch
import torch.nn.functional
from . import network_auxi as network
from lib.configs.config import cfg
from lib.utils.net_tools import *
from lib.models.PWN_planes import PWNPlanesLoss
from lib.models.PWN_edges import EdgeguidedNormalRegressionLoss
from lib.models.ranking_loss import EdgeguidedRankingLoss
from lib.models.ILNR_loss import MEADSTD_TANH_NORM_Loss
from lib.models.MSGIL_loss import MSGIL_NORM_Loss
class RelDepthModel(nn.Module):
def __init__(self):
super(RelDepthModel, self).__init__()
self.depth_model = DepthModel()
self.losses = ModelLoss()
def forward(self, data, is_train=True):
# Input data is a_real, predicted data is b_fake, groundtruth is b_real
self.inputs = data['rgb'].cuda()
self.logit, self.auxi = self.depth_model(self.inputs)
if is_train:
self.losses_dict = self.losses.criterion(self.logit, self.auxi, data)
else:
self.losses_dict = {'total_loss': torch.tensor(0.0, dtype=torch.float).cuda()}
return {'decoder': self.logit, 'auxi': self.auxi, 'losses': self.losses_dict}
def inference(self, data):
with torch.no_grad():
out = self.forward(data, is_train=False)
pred_depth = out['decoder']
pred_disp = out['auxi']
pred_depth_out = pred_depth
return {'pred_depth': pred_depth_out, 'pred_disp': pred_disp}
class ModelLoss(nn.Module):
def __init__(self):
super(ModelLoss, self).__init__()
################Loss for the main branch, i.e. on the depth map#################
# Geometry Loss
self.pn_plane = PWNPlanesLoss(focal_x=cfg.DATASET.FOCAL_X, focal_y=cfg.DATASET.FOCAL_Y,
input_size=cfg.DATASET.CROP_SIZE, sample_groups=5000, xyz_mode='xyz')
self.pn_edge = EdgeguidedNormalRegressionLoss(mask_value=-1e-8, max_threshold=10.1)
# self.surface_normal_loss = SurfaceNormalLoss()
# the scale can be adjusted
self.msg_normal_loss = MSGIL_NORM_Loss(scale=4, valid_threshold=-1e-8)
# Scale shift invariant. SSIMAEL_Loss is MIDAS loss. MEADSTD_TANH_NORM_Loss is our normalization loss.
self.meanstd_tanh_loss = MEADSTD_TANH_NORM_Loss(valid_threshold=-1e-8)
self.ranking_edge_loss = EdgeguidedRankingLoss(mask_value=-1e-8)
################Loss for the auxi branch, i.e. on the disp map#################
# the scale can be adjusted
self.msg_normal_auxiloss = MSGIL_NORM_Loss(scale=4, valid_threshold=-1e-8)
# Scale shift invariant. SSIMAEL_Loss is MIDAS loss. MEADSTD_TANH_NORM_Loss is our normalization loss.
self.meanstd_tanh_auxiloss = MEADSTD_TANH_NORM_Loss(valid_threshold=-1e-8)
self.ranking_edge_auxiloss = EdgeguidedRankingLoss(mask_value=-1e-8)
def criterion(self, pred_logit, auxi, data):
loss1 = self.decoder_loss(pred_logit, data)
loss2 = self.auxi_loss(auxi, data)
loss = {}
loss.update(loss1)
loss.update(loss2)
loss['total_loss'] = loss1['total_loss'] + loss2['total_loss']
return loss
def auxi_loss(self, auxi, data):
loss = {}
if 'disp' not in data:
return {'total_loss': torch.tensor(0.0).cuda()}
gt_disp = data['disp'].to(device=auxi.device)
mask_mid_quality = data['quality_flg'] >= 2
gt_disp_mid = gt_disp[mask_mid_quality]
auxi_mid = auxi[mask_mid_quality]
if '_ranking-edge-auxi_' in cfg.TRAIN.LOSS_MODE.lower():
loss['ranking-edge_auxiloss'] = self.ranking_edge_auxiloss(auxi, gt_disp, data['rgb'])
if '_msgil-normal-auxi_' in cfg.TRAIN.LOSS_MODE.lower():
loss['msg_normal_auxiloss'] = (self.msg_normal_auxiloss(auxi_mid, gt_disp_mid) * 0.5).float()
if '_meanstd-tanh-auxi_' in cfg.TRAIN.LOSS_MODE.lower():
loss['meanstd-tanh_auxiloss'] = self.meanstd_tanh_auxiloss(auxi_mid, gt_disp_mid)
total_loss = sum(loss.values())
loss['total_loss'] = total_loss * cfg.TRAIN.LOSS_AUXI_WEIGHT
return loss
def decoder_loss(self, pred_logit, data):
pred_depth = pred_logit
gt_depth = data['depth'].to(device=pred_depth.device)
# High-quality data, except webstereo data
mask_high_quality = data['quality_flg'] ==3
mask_mid_quality = data['quality_flg'] >= 2
# gt_depth_high = gt_depth[mask_high_quality]
# pred_depth_high = pred_depth[mask_high_quality]
gt_depth_mid = gt_depth[mask_mid_quality]
pred_depth_mid = pred_depth[mask_mid_quality]
#gt_depth_filter = data['mask_highquality']]
#pred_depth_filter = pred_depth[data['mask_highquality']]
#focal_length_filter = data['focal_length'][data['mask_highquality']]
# if gt_depth_high.ndim == 3:
# gt_depth_high = gt_depth_high[None, :, :, :]
# pred_depth_high = pred_depth_high[None, :, :, :]
if gt_depth_mid.ndim == 3:
gt_depth_mid = gt_depth_mid[None, :, :, :]
pred_depth_mid = pred_depth_mid[None, :, :, :]
loss = {}
if '_pairwise-normal-regress-edge_' in cfg.TRAIN.LOSS_MODE.lower() or \
'_pairwise-normal-regress-plane_' in cfg.TRAIN.LOSS_MODE.lower():
pred_ssinv = recover_scale_shift_depth(pred_depth, gt_depth, min_threshold=-1e-8, max_threshold=10.1)
else:
pred_ssinv = None
# Geometry Loss
if '_pairwise-normal-regress-plane_' in cfg.TRAIN.LOSS_MODE.lower():
focal_length = data['focal_length'] if 'focal_length' in data else None
loss['pairwise-normal-regress-plane_loss'] = self.pn_plane(gt_depth,
pred_ssinv,
data['planes'],
focal_length)
if '_pairwise-normal-regress-edge_' in cfg.TRAIN.LOSS_MODE.lower():
if mask_high_quality.sum():
loss['pairwise-normal-regress-edge_loss'] = self.pn_edge(pred_ssinv[mask_high_quality],
gt_depth[mask_high_quality],
data['rgb'][mask_high_quality],
focal_length=data['focal_length'][mask_high_quality])
else:
loss['pairwise-normal-regress-edge_loss'] = pred_ssinv.sum() * 0.
# Scale-shift Invariant Loss
if '_meanstd-tanh_' in cfg.TRAIN.LOSS_MODE.lower():
if mask_mid_quality.sum():
loss_ssi = self.meanstd_tanh_loss(pred_depth_mid, gt_depth_mid)
loss['meanstd-tanh_loss'] = loss_ssi
else:
loss['meanstd-tanh_loss'] = pred_depth.sum() * 0.
if '_ranking-edge_' in cfg.TRAIN.LOSS_MODE.lower():
loss['ranking-edge_loss'] = self.ranking_edge_loss(pred_depth, gt_depth, data['rgb'])
# Multi-scale Gradient Loss
if '_msgil-normal_' in cfg.TRAIN.LOSS_MODE.lower():
if mask_mid_quality.sum():
loss['msg_normal_loss'] = (self.msg_normal_loss(pred_depth_mid, gt_depth_mid) * 0.5).float()
else:
loss['msg_normal_loss'] = pred_depth.sum() * 0.
total_loss = sum(loss.values())
loss['total_loss'] = total_loss
return loss
class ModelOptimizer(object):
def __init__(self, model):
super(ModelOptimizer, self).__init__()
encoder_params = []
encoder_params_names = []
decoder_params = []
decoder_params_names = []
nograd_param_names = []
for key, value in model.named_parameters():
if value.requires_grad:
if 'res' in key:
encoder_params.append(value)
encoder_params_names.append(key)
else:
decoder_params.append(value)
decoder_params_names.append(key)
else:
nograd_param_names.append(key)
lr_encoder = cfg.TRAIN.BASE_LR
lr_decoder = cfg.TRAIN.BASE_LR * cfg.TRAIN.SCALE_DECODER_LR
weight_decay = 0.0005
net_params = [
{'params': encoder_params,
'lr': lr_encoder,
'weight_decay': weight_decay},
{'params': decoder_params,
'lr': lr_decoder,
'weight_decay': weight_decay},
]
self.optimizer = torch.optim.SGD(net_params, momentum=0.9)
self.model = model
def optim(self, loss):
self.optimizer.zero_grad()
loss_all = loss['total_loss']
loss_all.backward()
torch.nn.utils.clip_grad_norm_(self.model.parameters(), 10)
self.optimizer.step()
class DepthModel(nn.Module):
def __init__(self):
super(DepthModel, self).__init__()
backbone = network.__name__.split('.')[-1] + '.' + cfg.MODEL.ENCODER
self.encoder_modules = get_func(backbone)()
self.decoder_modules = network.Decoder()
self.auxi_modules = network.AuxiNetV2()
def forward(self, x):
lateral_out = self.encoder_modules(x)
out_logit, auxi_input = self.decoder_modules(lateral_out)
out_auxi = self.auxi_modules(auxi_input)
return out_logit, out_auxi
def recover_scale_shift_depth(pred, gt, min_threshold=1e-8, max_threshold=1e8):
b, c, h, w = pred.shape
mask = (gt > min_threshold) & (gt < max_threshold) # [b, c, h, w]
EPS = 1e-6 * torch.eye(2, dtype=pred.dtype, device=pred.device)
scale_shift_batch = []
ones_img = torch.ones((1, h, w), dtype=pred.dtype, device=pred.device)
for i in range(b):
mask_i = mask[i, ...]
pred_valid_i = pred[i, ...][mask_i]
ones_i = ones_img[mask_i]
pred_valid_ones_i = torch.stack((pred_valid_i, ones_i), dim=0) # [c+1, n]
A_i = torch.matmul(pred_valid_ones_i, pred_valid_ones_i.permute(1, 0)) # [2, 2]
A_inverse = torch.inverse(A_i + EPS)
gt_i = gt[i, ...][mask_i]
B_i = torch.matmul(pred_valid_ones_i, gt_i)[:, None] # [2, 1]
scale_shift_i = torch.matmul(A_inverse, B_i) # [2, 1]
scale_shift_batch.append(scale_shift_i)
scale_shift_batch = torch.stack(scale_shift_batch, dim=0) # [b, 2, 1]
ones = torch.ones_like(pred)
pred_ones = torch.cat((pred, ones), dim=1) # [b, 2, h, w]
pred_scale_shift = torch.matmul(pred_ones.permute(0, 2, 3, 1).reshape(b, h * w, 2), scale_shift_batch) # [b, h*w, 1]
pred_scale_shift = pred_scale_shift.permute(0, 2, 1).reshape((b, c, h, w))
return pred_scale_shift
| {
"repo_name": "aim-uofa/AdelaiDepth",
"stars": "926",
"repo_language": "Python",
"file_name": "train.py",
"mime_type": "text/x-script.python"
} |
{
"repo_name": "aim-uofa/AdelaiDepth",
"stars": "926",
"repo_language": "Python",
"file_name": "train.py",
"mime_type": "text/x-script.python"
} |
|
import torch.nn as nn
import torch.utils.model_zoo as model_zoo
import torchvision
from lib.configs.config import cfg
import torch.nn as NN
__all__ = ['ResNet', 'resnet18', 'resnet34', 'resnet50', 'resnet101',
'resnet152']
model_urls = {
'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',
'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',
'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',
'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',
}
def conv3x3(in_planes, out_planes, stride=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = NN.BatchNorm2d(planes) #NN.BatchNorm2d
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = NN.BatchNorm2d(planes) #NN.BatchNorm2d
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = NN.BatchNorm2d(planes) #NN.BatchNorm2d
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn2 = NN.BatchNorm2d(planes) #NN.BatchNorm2d
self.conv3 = nn.Conv2d(planes, planes * self.expansion, kernel_size=1, bias=False)
self.bn3 = NN.BatchNorm2d(planes * self.expansion) #NN.BatchNorm2d
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, layers, num_classes=1000):
self.inplanes = 64
super(ResNet, self).__init__()
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = NN.BatchNorm2d(64) #NN.BatchNorm2d
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
#self.avgpool = nn.AvgPool2d(7, stride=1)
#self.fc = nn.Linear(512 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, nn.BatchNorm2d):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
NN.BatchNorm2d(planes * block.expansion), #NN.BatchNorm2d
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
features = []
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
features.append(x)
x = self.layer2(x)
features.append(x)
x = self.layer3(x)
features.append(x)
x = self.layer4(x)
features.append(x)
return features
def resnet18(pretrained=True, **kwargs):
"""Constructs a ResNet-18 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)
if pretrained:
pretrained_dict = model_zoo.load_url(model_urls['resnet18'], cfg.ROOT_DIR + '/' + cfg.MODEL.MODEL_REPOSITORY)
#pretrained_model = torchvision.models.resnet18(pretrained=True)
#pretrained_dict = pretrained_model.state_dict()
model_dict = model.state_dict()
pretrained_dict = {k:v for k, v in pretrained_dict.items() if k in model_dict}
model_dict.update(pretrained_dict)
model.load_state_dict(model_dict)
return model
def resnet34(pretrained=True, **kwargs):
"""Constructs a ResNet-34 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(BasicBlock, [3, 4, 6, 3], **kwargs)
if pretrained:
pretrained_dict = model_zoo.load_url(model_urls['resnet34'], cfg.ROOT_DIR + '/' + cfg.MODEL.MODEL_REPOSITORY)
#pretrained_model = torchvision.models.resnet34(pretrained=True)
#pretrained_dict = pretrained_model.state_dict()
model_dict = model.state_dict()
pretrained_dict = {k:v for k, v in pretrained_dict.items() if k in model_dict}
model_dict.update(pretrained_dict)
model.load_state_dict(model_dict)
return model
def resnet50(pretrained=True, **kwargs):
"""Constructs a ResNet-50 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(Bottleneck, [3, 4, 6, 3], **kwargs)
if pretrained:
pretrained_dict = model_zoo.load_url(model_urls['resnet50'], cfg.ROOT_DIR + '/' + cfg.MODEL.MODEL_REPOSITORY)
#pretrained_model = torchvision.models.resnet50(pretrained=True)
#pretrained_model = gcv.models.resnet50(pretrained=True)
#pretrained_dict = pretrained_model.state_dict()
model_dict = model.state_dict()
pretrained_dict = {k:v for k, v in pretrained_dict.items() if k in model_dict}
model_dict.update(pretrained_dict)
model.load_state_dict(model_dict)
return model
def resnet101(pretrained=True, **kwargs):
"""Constructs a ResNet-101 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(Bottleneck, [3, 4, 23, 3], **kwargs)
if pretrained:
pretrained_dict = model_zoo.load_url(model_urls['resnet101'], cfg.ROOT_DIR + '/' + cfg.MODEL.MODEL_REPOSITORY)
#pretrained_model = torchvision.models.resnet101(pretrained=True)
#pretrained_model = gcv.models.resnet101(pretrained=True)
#pretrained_dict = pretrained_model.state_dict()
model_dict = model.state_dict()
pretrained_dict = {k:v for k, v in pretrained_dict.items() if k in model_dict}
model_dict.update(pretrained_dict)
model.load_state_dict(model_dict)
return model
def resnet152(pretrained=True, **kwargs):
"""Constructs a ResNet-152 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(Bottleneck, [3, 8, 36, 3], **kwargs)
if pretrained:
pretrained_dict = model_zoo.load_url(model_urls['resnet152'], cfg.ROOT_DIR + '/' + cfg.MODEL.MODEL_REPOSITORY)
#pretrained_model = torchvision.models.resnet152(pretrained=True)
#pretrained_model = gcv.models.resnet152(pretrained=True)
#pretrained_dict = pretrained_model.state_dict()
model_dict = model.state_dict()
pretrained_dict = {k:v for k, v in pretrained_dict.items() if k in model_dict}
model_dict.update(pretrained_dict)
model.load_state_dict(model_dict)
return model
if __name__ == '__main__':
import torch
model = resnet50(True).cuda()
rgb = torch.rand((2, 3, 256, 256)).cuda()
out = model(rgb)
print(len(out))
| {
"repo_name": "aim-uofa/AdelaiDepth",
"stars": "926",
"repo_language": "Python",
"file_name": "train.py",
"mime_type": "text/x-script.python"
} |
import torch
from torch import nn
import numpy as np
import torch.nn.functional as F
from lib.models.Surface_normal import surface_normal_from_depth, vis_normal
"""
Sampling strategies: RS (Random Sampling), EGS (Edge-Guided Sampling), and IGS (Instance-Guided Sampling)
"""
###########
# RANDOM SAMPLING
# input:
# inputs[i,:], targets[i, :], masks[i, :], self.mask_value, self.point_pairs
# return:
# inputs_A, inputs_B, targets_A, targets_B, consistent_masks_A, consistent_masks_B
###########
def randomSamplingNormal(inputs, targets, masks, sample_num):
# find A-B point pairs from predictions
num_effect_pixels = torch.sum(masks)
shuffle_effect_pixels = torch.randperm(num_effect_pixels).cuda()
valid_inputs = inputs[:, masks]
valid_targes = targets[:, masks]
inputs_A = valid_inputs[:, shuffle_effect_pixels[0:sample_num*2:2]]
inputs_B = valid_inputs[:, shuffle_effect_pixels[1:sample_num*2:2]]
# find corresponding pairs from GT
targets_A = valid_targes[:, shuffle_effect_pixels[0:sample_num*2:2]]
targets_B = valid_targes[:, shuffle_effect_pixels[1:sample_num*2:2]]
if inputs_A.shape[1] != inputs_B.shape[1]:
num_min = min(targets_A.shape[1], targets_B.shape[1])
inputs_A = inputs_A[:, :num_min]
inputs_B = inputs_B[:, :num_min]
targets_A = targets_A[:, :num_min]
targets_B = targets_B[:, :num_min]
return inputs_A, inputs_B, targets_A, targets_B
###########
# EDGE-GUIDED SAMPLING
# input:
# inputs[i,:], targets[i, :], masks[i, :], edges_img[i], thetas_img[i], masks[i, :], h, w
# return:
# inputs_A, inputs_B, targets_A, targets_B, masks_A, masks_B
###########
def ind2sub(idx, cols):
r = idx / cols
c = idx - r * cols
return r, c
def sub2ind(r, c, cols):
idx = r * cols + c
return idx
def edgeGuidedSampling(inputs, targets, edges_img, thetas_img, masks, h, w):
# find edges
edges_max = edges_img.max()
edges_min = edges_img.min()
edges_mask = edges_img.ge(edges_max*0.1)
edges_loc = edges_mask.nonzero()
thetas_edge = torch.masked_select(thetas_img, edges_mask)
minlen = thetas_edge.size()[0]
# find anchor points (i.e, edge points)
sample_num = minlen
index_anchors = torch.randint(0, minlen, (sample_num,), dtype=torch.long).cuda()
theta_anchors = torch.gather(thetas_edge, 0, index_anchors)
row_anchors, col_anchors = ind2sub(edges_loc[index_anchors].squeeze(1), w)
## compute the coordinates of 4-points, distances are from [2, 30]
distance_matrix = torch.randint(3, 20, (4,sample_num)).cuda()
pos_or_neg = torch.ones(4,sample_num).cuda()
pos_or_neg[:2,:] = -pos_or_neg[:2,:]
distance_matrix = distance_matrix.float() * pos_or_neg
col = col_anchors.unsqueeze(0).expand(4, sample_num).long() + torch.round(distance_matrix.double() * torch.cos(theta_anchors).unsqueeze(0)).long()
row = row_anchors.unsqueeze(0).expand(4, sample_num).long() + torch.round(distance_matrix.double() * torch.sin(theta_anchors).unsqueeze(0)).long()
# constrain 0=<c<=w, 0<=r<=h
# Note: index should minus 1
col[col<0] = 0
col[col>w-1] = w-1
row[row<0] = 0
row[row>h-1] = h-1
# a-b, b-c, c-d
a = sub2ind(row[0,:], col[0,:], w)
b = sub2ind(row[1,:], col[1,:], w)
c = sub2ind(row[2,:], col[2,:], w)
d = sub2ind(row[3,:], col[3,:], w)
A = torch.cat((a,b,c), 0)
B = torch.cat((b,c,d), 0)
inputs_A = inputs[:, A]
inputs_B = inputs[:, B]
targets_A = targets[:, A]
targets_B = targets[:, B]
masks_A = torch.gather(masks, 0, A.long())
masks_B = torch.gather(masks, 0, B.long())
return inputs_A, inputs_B, targets_A, targets_B, masks_A, masks_B, sample_num, row, col
class EdgeguidedNormalRegressionLoss(nn.Module):
def __init__(self, point_pairs=10000, cos_theta1=0.3, cos_theta2=0.95, cos_theta3=0.5, cos_theta4=0.86, mask_value=-1e-8, max_threshold=10.1):
super(EdgeguidedNormalRegressionLoss, self).__init__()
self.point_pairs = point_pairs # number of point pairs
self.mask_value = mask_value
self.max_threshold = max_threshold
self.cos_theta1 = cos_theta1 # 75 degree
self.cos_theta2 = cos_theta2 # 10 degree
self.cos_theta3 = cos_theta3 # 60 degree
self.cos_theta4 = cos_theta4 # 30 degree
self.kernel = torch.tensor(np.array([[1, 1, 1], [1, 1, 1], [1, 1, 1]], dtype=np.float32), requires_grad=False)[None, None, :, :].cuda()
def scale_shift_pred_depth(self, pred, gt):
b, c, h, w = pred.shape
mask = (gt > self.mask_value) & (gt < self.max_threshold) # [b, c, h, w]
EPS = 1e-6 * torch.eye(2, dtype=pred.dtype, device=pred.device)
scale_shift_batch = []
ones_img = torch.ones((1, h, w), dtype=pred.dtype, device=pred.device)
for i in range(b):
mask_i = mask[i, ...]
pred_valid_i = pred[i, ...][mask_i]
ones_i = ones_img[mask_i]
pred_valid_ones_i = torch.stack((pred_valid_i, ones_i), dim=0) # [c+1, n]
A_i = torch.matmul(pred_valid_ones_i, pred_valid_ones_i.permute(1, 0)) # [2, 2]
A_inverse = torch.inverse(A_i + EPS)
gt_i = gt[i, ...][mask_i]
B_i = torch.matmul(pred_valid_ones_i, gt_i)[:, None] # [2, 1]
scale_shift_i = torch.matmul(A_inverse, B_i) # [2, 1]
scale_shift_batch.append(scale_shift_i)
scale_shift_batch = torch.stack(scale_shift_batch, dim=0) # [b, 2, 1]
ones = torch.ones_like(pred)
pred_ones = torch.cat((pred, ones), dim=1) # [b, 2, h, w]
pred_scale_shift = torch.matmul(pred_ones.permute(0, 2, 3, 1).reshape(b, h * w, 2), scale_shift_batch) # [b, h*w, 1]
pred_scale_shift = pred_scale_shift.permute(0, 2, 1).reshape((b, c, h, w))
return pred_scale_shift
def getEdge(self, images):
n,c,h,w = images.size()
a = torch.Tensor([[-1, 0, 1], [-2, 0, 2], [-1, 0, 1]]).cuda().view((1,1,3,3)).repeat(1, 1, 1, 1)
b = torch.Tensor([[1, 2, 1], [0, 0, 0], [-1, -2, -1]]).cuda().view((1,1,3,3)).repeat(1, 1, 1, 1)
if c == 3:
gradient_x = F.conv2d(images[:,0,:,:].unsqueeze(1), a)
gradient_y = F.conv2d(images[:,0,:,:].unsqueeze(1), b)
else:
gradient_x = F.conv2d(images, a)
gradient_y = F.conv2d(images, b)
edges = torch.sqrt(torch.pow(gradient_x,2)+ torch.pow(gradient_y,2))
edges = F.pad(edges, (1,1,1,1), "constant", 0)
thetas = torch.atan2(gradient_y, gradient_x)
thetas = F.pad(thetas, (1,1,1,1), "constant", 0)
return edges, thetas
def getNormalEdge(self, normals):
n,c,h,w = normals.size()
a = torch.Tensor([[-1, 0, 1], [-2, 0, 2], [-1, 0, 1]]).cuda().view((1,1,3,3)).repeat(3, 1, 1, 1)
b = torch.Tensor([[1, 2, 1], [0, 0, 0], [-1, -2, -1]]).cuda().view((1,1,3,3)).repeat(3, 1, 1, 1)
gradient_x = torch.abs(F.conv2d(normals, a, groups=c))
gradient_y = torch.abs(F.conv2d(normals, b, groups=c))
gradient_x = gradient_x.mean(dim=1, keepdim=True)
gradient_y = gradient_y.mean(dim=1, keepdim=True)
edges = torch.sqrt(torch.pow(gradient_x,2)+ torch.pow(gradient_y,2))
edges = F.pad(edges, (1,1,1,1), "constant", 0)
thetas = torch.atan2(gradient_y, gradient_x)
thetas = F.pad(thetas, (1,1,1,1), "constant", 0)
return edges, thetas
def forward(self, pred_depths, gt_depths, images, focal_length):
"""
inputs and targets: surface normal image
images: rgb images
"""
masks = gt_depths > self.mask_value
#pred_depths_ss = self.scale_shift_pred_depth(pred_depths, gt_depths)
inputs = surface_normal_from_depth(pred_depths, focal_length, valid_mask=masks)
targets = surface_normal_from_depth(gt_depths, focal_length, valid_mask=masks)
# find edges from RGB
edges_img, thetas_img = self.getEdge(images)
# find edges from normals
edges_normal, thetas_normal = self.getNormalEdge(targets)
mask_img_border = torch.ones_like(edges_normal) # normals on the borders
mask_img_border[:, :, 5:-5, 5:-5] = 0
edges_normal[mask_img_border.bool()] = 0
# find edges from depth
edges_depth, _ = self.getEdge(gt_depths)
edges_depth_mask = edges_depth.ge(edges_depth.max() * 0.1)
edges_mask_dilate = torch.clamp(torch.nn.functional.conv2d(edges_depth_mask.float(), self.kernel, padding=(1, 1)), 0,
1).bool()
edges_normal[edges_mask_dilate] = 0
edges_img[edges_mask_dilate] = 0
#=============================
n,c,h,w = targets.size()
inputs = inputs.contiguous().view(n, c, -1).double()
targets = targets.contiguous().view(n, c, -1).double()
masks = masks.contiguous().view(n, -1)
edges_img = edges_img.contiguous().view(n, -1).double()
thetas_img = thetas_img.contiguous().view(n, -1).double()
edges_normal = edges_normal.view(n, -1).double()
thetas_normal = thetas_normal.view(n, -1).double()
# initialization
loss = torch.DoubleTensor([0.0]).cuda()
for i in range(n):
# Edge-Guided sampling
inputs_A, inputs_B, targets_A, targets_B, masks_A, masks_B, sample_num, row_img, col_img = edgeGuidedSampling(inputs[i,:], targets[i, :], edges_img[i], thetas_img[i], masks[i, :], h, w)
normal_inputs_A, normal_inputs_B, normal_targets_A, normal_targets_B, normal_masks_A, normal_masks_B, normal_sample_num, row_normal, col_normal = edgeGuidedSampling(inputs[i,:], targets[i, :], edges_normal[i], thetas_normal[i], masks[i, :], h, w)
# Combine EGS + EGNS
inputs_A = torch.cat((inputs_A, normal_inputs_A), 1)
inputs_B = torch.cat((inputs_B, normal_inputs_B), 1)
targets_A = torch.cat((targets_A, normal_targets_A), 1)
targets_B = torch.cat((targets_B, normal_targets_B), 1)
masks_A = torch.cat((masks_A, normal_masks_A), 0)
masks_B = torch.cat((masks_B, normal_masks_B), 0)
# consider forward-backward consistency checking, i.e, only compute losses of point pairs with valid GT
consistency_mask = masks_A & masks_B
#GT ordinal relationship
target_cos = torch.abs(torch.sum(targets_A * targets_B, dim=0))
input_cos = torch.abs(torch.sum(inputs_A * inputs_B, dim=0))
# ranking regression
#loss += torch.mean(torch.abs(target_cos[consistency_mask] - input_cos[consistency_mask]))
# Ranking for samples
mask_cos75 = target_cos < self.cos_theta1
mask_cos10 = target_cos > self.cos_theta2
# Regression for samples
loss += torch.sum(torch.abs(target_cos[mask_cos75 & consistency_mask] - input_cos[mask_cos75 & consistency_mask])) / (torch.sum(mask_cos75 & consistency_mask)+1e-8)
loss += torch.sum(torch.abs(target_cos[mask_cos10 & consistency_mask] - input_cos[mask_cos10 & consistency_mask])) / (torch.sum(mask_cos10 & consistency_mask)+1e-8)
# Random Sampling regression
random_sample_num = torch.sum(mask_cos10 & consistency_mask) + torch.sum(torch.sum(mask_cos75 & consistency_mask))
random_inputs_A, random_inputs_B, random_targets_A, random_targets_B = randomSamplingNormal(inputs[i,:], targets[i, :], masks[i, :], random_sample_num)
#GT ordinal relationship
random_target_cos = torch.abs(torch.sum(random_targets_A * random_targets_B, dim=0))
random_input_cos = torch.abs(torch.sum(random_inputs_A * random_inputs_B, dim=0))
loss += torch.sum(torch.abs(random_target_cos - random_input_cos)) / (random_target_cos.shape[0] + 1e-8)
if loss[0] != 0:
return loss[0].float() / n
else:
return pred_depths.sum() * 0.0
if __name__ == '__main__':
import cv2
rank_loss = EdgeguidedNormalRegressionLoss()
pred_depth = np.random.randn(2, 1, 480, 640)
gt_depth = np.random.randn(2, 1, 480, 640)
# gt_depth = cv2.imread('/hardware/yifanliu/SUNRGBD/sunrgbd-meta-data/sunrgbd_test_depth/2.png', -1)
# gt_depth = gt_depth[None, :, :, None]
# pred_depth = gt_depth[:, :, ::-1, :]
gt_depth = torch.tensor(np.asarray(gt_depth, np.float32)).cuda()
pred_depth = torch.tensor(np.asarray(pred_depth, np.float32)).cuda()
loss = rank_loss(pred_depth, gt_depth)
print(loss)
| {
"repo_name": "aim-uofa/AdelaiDepth",
"stars": "926",
"repo_language": "Python",
"file_name": "train.py",
"mime_type": "text/x-script.python"
} |
#!/usr/bin/env python2
# coding: utf-8
"""
This network structure follows Ke Xian's work, "Structure-guided Ranking Loss for Single Image Depth Prediction".
"""
import torch
import torch.nn as nn
import torch.nn.init as init
import torch.nn.functional as F
from lib.configs.config import cfg
from lib.models import Resnet, Resnext_torch
def resnet18_stride32():
return DepthNet(backbone='resnet', depth=18, upfactors=[2, 2, 2, 2])
def resnet34_stride32():
return DepthNet(backbone='resnet', depth=34, upfactors=[2, 2, 2, 2])
def resnet50_stride32():
return DepthNet(backbone='resnet', depth=50, upfactors=[2, 2, 2, 2])
def resnet101_stride32():
return DepthNet(backbone='resnet', depth=101, upfactors=[2, 2, 2, 2])
def resnet152_stride32():
return DepthNet(backbone='resnet', depth=152, upfactors=[2, 2, 2, 2])
def resnext101_stride32x8d():
return DepthNet(backbone='resnext101_32x8d', depth=101, upfactors=[2, 2, 2, 2])
def mobilenetv2():
return DepthNet(backbone='mobilenetv2', depth=00, upfactors=[2, 2, 2, 2])
class AuxiBlock(nn.Module):
def __init__(self, dim_in, dim_out):
super().__init__()
self.dim_in = dim_in
self.dim_out = dim_out
self.conv1 = nn.Conv2d(self.dim_in, self.dim_out, 1, stride=1, padding=0, bias=False)
self.conv2 = nn.Conv2d(self.dim_out, self.dim_out, 3, stride=1, padding=1, dilation=1, bias=True)
self.bn1 = nn.BatchNorm2d(self.dim_out, momentum=0.5)
self.bn2 = nn.BatchNorm2d(self.dim_out, momentum=0.5)
self.relu = nn.ReLU(inplace=True)
def forward(self, top, lateral):
if lateral.shape[2] != top.shape[2]:
h, w = lateral.size(2), lateral.size(3)
top = F.interpolate(input=top, size=(h, w), mode='bilinear',align_corners=True)
out = torch.cat((lateral, top), dim=1)
out = self.relu(self.bn1(self.conv1(out)))
out = self.relu(self.bn2(self.conv2(out)))
return out
class AuxiNetV2(nn.Module):
def __init__(self):
super().__init__()
self.inchannels = cfg.MODEL.RESNET_BOTTLENECK_DIM[1:] # [256, 512, 1024, 2048]
self.midchannels = cfg.MODEL.LATERAL_OUT[::-1] # [256, 256, 256, 512]
self.auxi_block1 = AuxiBlock(self.midchannels[2]+self.midchannels[3], 128)
self.auxi_block2 = AuxiBlock(128 + self.midchannels[2], 128)
self.auxi_block3 = AuxiBlock(128 + self.midchannels[2], 128)
self.auxi_block4 = AuxiBlock(128 + self.midchannels[1], 128)
self.auxi_block5 = AuxiBlock(128 + self.midchannels[0], 128)
self.out_conv = AO(128, 1, 2)
self._init_weights()
def _init_weights(self):
def init_func(m):
if isinstance(m, nn.Conv2d):
init.normal_(m.weight, std=0.01)
if m.bias is not None:
init.constant_(m.bias, 0)
elif isinstance(m, nn.ConvTranspose2d):
init.normal_(m.weight, std=0.01)
if m.bias is not None:
init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d): # NN.BatchNorm2d
init.constant_(m.weight, 1)
init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
init.normal_(m.weight, std=0.01)
if m.bias is not None:
init.constant_(m.bias, 0)
def init_model_weight(m):
for child_m in m.children():
if not isinstance(child_m, nn.ModuleList):
child_m.apply(init_func)
init_model_weight(self)
def forward(self, auxi_in):
out = self.auxi_block1(auxi_in[0], auxi_in[1]) # 1/32
out = self.auxi_block2(out, auxi_in[2]) # 1/16
out = self.auxi_block3(out, auxi_in[3]) # 1/8
out = self.auxi_block4(out, auxi_in[4]) # 1/4
out = self.auxi_block5(out, auxi_in[5]) # 1/2
out = self.out_conv(out)
return out
class AuxiNet(nn.Module):
def __init__(self):
super().__init__()
self.inchannels = cfg.MODEL.RESNET_BOTTLENECK_DIM[1:] # [256, 512, 1024, 2048]
self.midchannels = cfg.MODEL.LATERAL_OUT[::-1] # [256, 256, 256, 512]
self.auxi_block1 = AuxiBlock(self.midchannels[2]+self.midchannels[3], 256)
self.auxi_block2 = AuxiBlock(256 + self.midchannels[2], 256)
self.auxi_block3 = AuxiBlock(256 + self.midchannels[2], 256)
self.auxi_block4 = AuxiBlock(256 + self.midchannels[1], 256)
self.auxi_block5 = AuxiBlock(256 + self.midchannels[0], 256)
self.out_conv = AO(256, 1, 2)
self._init_weights()
def _init_weights(self):
def init_func(m):
if isinstance(m, nn.Conv2d):
init.normal_(m.weight, std=0.01)
if m.bias is not None:
init.constant_(m.bias, 0)
elif isinstance(m, nn.ConvTranspose2d):
init.normal_(m.weight, std=0.01)
if m.bias is not None:
init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d): # NN.BatchNorm2d
init.constant_(m.weight, 1)
init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
init.normal_(m.weight, std=0.01)
if m.bias is not None:
init.constant_(m.bias, 0)
def init_model_weight(m):
for child_m in m.children():
if not isinstance(child_m, nn.ModuleList):
child_m.apply(init_func)
init_model_weight(self)
def forward(self, auxi_in):
out = self.auxi_block1(auxi_in[0], auxi_in[1]) # 1/32
out = self.auxi_block2(out, auxi_in[2]) # 1/16
out = self.auxi_block3(out, auxi_in[3]) # 1/8
out = self.auxi_block4(out, auxi_in[4]) # 1/4
out = self.auxi_block5(out, auxi_in[5]) # 1/2
out = self.out_conv(out)
return out
class Decoder(nn.Module):
def __init__(self):
super(Decoder, self).__init__()
self.inchannels = cfg.MODEL.RESNET_BOTTLENECK_DIM[1:] # [256, 512, 1024, 2048]
self.midchannels = cfg.MODEL.LATERAL_OUT[::-1] # [256, 256, 256, 512]
self.upfactors = [2,2,2,2]
self.outchannels = cfg.MODEL.DECODER_OUTPUT_C # 1
self.conv = FTB(inchannels=self.inchannels[3], midchannels=self.midchannels[3])
self.conv1 = nn.Conv2d(in_channels=self.midchannels[3], out_channels=self.midchannels[2], kernel_size=3, padding=1, stride=1, bias=True)
self.upsample = nn.Upsample(scale_factor=self.upfactors[3], mode='bilinear', align_corners=True)
self.ffm2 = FFM(inchannels=self.inchannels[2], midchannels=self.midchannels[2], outchannels = self.midchannels[2], upfactor=self.upfactors[2])
self.ffm1 = FFM(inchannels=self.inchannels[1], midchannels=self.midchannels[1], outchannels = self.midchannels[1], upfactor=self.upfactors[1])
self.ffm0 = FFM(inchannels=self.inchannels[0], midchannels=self.midchannels[0], outchannels = self.midchannels[0], upfactor=self.upfactors[0])
#self.outconv = nn.Conv2d(in_channels=self.inchannels[0], out_channels=self.outchannels, kernel_size=3, padding=1, stride=1, bias=True)
self.outconv = AO(inchannels=self.midchannels[0], outchannels=self.outchannels, upfactor=2)
self._init_params()
def _init_params(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
init.normal_(m.weight, std=0.01)
if m.bias is not None:
init.constant_(m.bias, 0)
elif isinstance(m, nn.ConvTranspose2d):
init.normal_(m.weight, std=0.01)
if m.bias is not None:
init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d): #NN.BatchNorm2d
init.constant_(m.weight, 1)
init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
init.normal_(m.weight, std=0.01)
if m.bias is not None:
init.constant_(m.bias, 0)
def forward(self, features):
# features' shape: # 1/32, 1/16, 1/8, 1/4
# _,_,h,w = features[3].size()
x_32x = self.conv(features[3]) # 1/32
x_32 = self.conv1(x_32x)
x_16 = self.upsample(x_32) # 1/16
x_8 = self.ffm2(features[2], x_16) # 1/8
#print('ffm2:', x.size())
x_4 = self.ffm1(features[1], x_8) # 1/4
#print('ffm1:', x.size())
x_2 = self.ffm0(features[0], x_4) # 1/2
#print('ffm0:', x.size())
#-----------------------------------------
x = self.outconv(x_2) # original size
auxi_input = [x_32x, x_32, x_16, x_8, x_4, x_2]
return x, auxi_input
class DepthNet(nn.Module):
__factory = {
18: Resnet.resnet18,
34: Resnet.resnet34,
50: Resnet.resnet50,
101: Resnet.resnet101,
152: Resnet.resnet152
}
def __init__(self,
backbone='resnet',
depth=50,
upfactors=[2, 2, 2, 2]):
super(DepthNet, self).__init__()
self.backbone = backbone
self.depth = depth
self.pretrained = cfg.MODEL.LOAD_IMAGENET_PRETRAINED_WEIGHTS # True
self.inchannels = cfg.MODEL.RESNET_BOTTLENECK_DIM[1:] # [256, 512, 1024, 2048]
self.midchannels = cfg.MODEL.LATERAL_OUT[::-1] # [256, 256, 256, 512]
self.upfactors = upfactors
self.outchannels = cfg.MODEL.DECODER_OUTPUT_C # 1
# Build model
if self.backbone == 'resnet':
if self.depth not in DepthNet.__factory:
raise KeyError("Unsupported depth:", self.depth)
self.encoder = DepthNet.__factory[depth](pretrained=self.pretrained)
elif self.backbone == 'resnext101_32x8d':
self.encoder = Resnext_torch.resnext101_32x8d(pretrained=self.pretrained)
elif self.backbone == 'mobilenetv2':
self.encoder = MobileNet_torch.mobilenet_v2(pretrained=self.pretrained)
else:
self.encoder = Resnext_torch.resnext101(pretrained=self.pretrained)
def forward(self, x):
x = self.encoder(x) # 1/32, 1/16, 1/8, 1/4
return x
class FTB(nn.Module):
def __init__(self, inchannels, midchannels=512):
super(FTB, self).__init__()
self.in1 = inchannels
self.mid = midchannels
self.conv1 = nn.Conv2d(in_channels=self.in1, out_channels=self.mid, kernel_size=3, padding=1, stride=1,
bias=True)
# NN.BatchNorm2d
# self.sample_conv = nn.Sequential(nn.Conv2d(in_channels=self.mid, out_channels=self.mid, kernel_size=3, padding=1, stride=1, bias=True),
# nn.ReLU(inplace=True),
# nn.BatchNorm2d(num_features=self.mid),
# nn.Conv2d(in_channels=self.mid, out_channels= self.mid, kernel_size=3, padding=1, stride=1, bias=True))
self.conv_branch = nn.Sequential(nn.ReLU(inplace=True), \
nn.Conv2d(in_channels=self.mid, out_channels=self.mid, kernel_size=3,
padding=1, stride=1, bias=True), \
nn.BatchNorm2d(num_features=self.mid), \
nn.ReLU(inplace=True), \
nn.Conv2d(in_channels=self.mid, out_channels=self.mid, kernel_size=3,
padding=1, stride=1, bias=True))
self.relu = nn.ReLU(inplace=True)
self.init_params()
def forward(self, x):
x = self.conv1(x)
x = x + self.conv_branch(x)
x = self.relu(x)
return x
def init_params(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
init.normal_(m.weight, std=0.01)
if m.bias is not None:
init.constant_(m.bias, 0)
elif isinstance(m, nn.ConvTranspose2d):
# init.kaiming_normal_(m.weight, mode='fan_out')
init.normal_(m.weight, std=0.01)
# init.xavier_normal_(m.weight)
if m.bias is not None:
init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d): # NN.BatchNorm2d
init.constant_(m.weight, 1)
init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
init.normal_(m.weight, std=0.01)
if m.bias is not None:
init.constant_(m.bias, 0)
class ATA(nn.Module):
def __init__(self, inchannels, reduction=8):
super(ATA, self).__init__()
self.inchannels = inchannels
self.avg_pool = nn.AdaptiveAvgPool2d(1)
self.fc = nn.Sequential(nn.Linear(self.inchannels * 2, self.inchannels // reduction),
nn.ReLU(inplace=True),
nn.Linear(self.inchannels // reduction, self.inchannels),
nn.Sigmoid())
self.init_params()
def forward(self, low_x, high_x):
n, c, _, _ = low_x.size()
x = torch.cat([low_x, high_x], 1)
x = self.avg_pool(x)
x = x.view(n, -1)
x = self.fc(x).view(n, c, 1, 1)
x = low_x * x + high_x
return x
def init_params(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
# init.kaiming_normal_(m.weight, mode='fan_out')
# init.normal(m.weight, std=0.01)
init.xavier_normal_(m.weight)
if m.bias is not None:
init.constant_(m.bias, 0)
elif isinstance(m, nn.ConvTranspose2d):
# init.kaiming_normal_(m.weight, mode='fan_out')
# init.normal_(m.weight, std=0.01)
init.xavier_normal_(m.weight)
if m.bias is not None:
init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d): # NN.BatchNorm2d
init.constant_(m.weight, 1)
init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
init.normal_(m.weight, std=0.01)
if m.bias is not None:
init.constant_(m.bias, 0)
class FFM(nn.Module):
def __init__(self, inchannels, midchannels, outchannels, upfactor=2):
super(FFM, self).__init__()
self.inchannels = inchannels
self.midchannels = midchannels
self.outchannels = outchannels
self.upfactor = upfactor
self.ftb1 = FTB(inchannels=self.inchannels, midchannels=self.midchannels)
# self.ata = ATA(inchannels = self.midchannels)
self.ftb2 = FTB(inchannels=self.midchannels, midchannels=self.outchannels)
self.upsample = nn.Upsample(scale_factor=self.upfactor, mode='bilinear', align_corners=True)
self.init_params()
def forward(self, low_x, high_x):
x = self.ftb1(low_x)
x = x + high_x
x = self.ftb2(x)
x = self.upsample(x)
return x
def init_params(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
# init.kaiming_normal_(m.weight, mode='fan_out')
init.normal_(m.weight, std=0.01)
# init.xavier_normal_(m.weight)
if m.bias is not None:
init.constant_(m.bias, 0)
elif isinstance(m, nn.ConvTranspose2d):
# init.kaiming_normal_(m.weight, mode='fan_out')
init.normal_(m.weight, std=0.01)
# init.xavier_normal_(m.weight)
if m.bias is not None:
init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d): # NN.Batchnorm2d
init.constant_(m.weight, 1)
init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
init.normal_(m.weight, std=0.01)
if m.bias is not None:
init.constant_(m.bias, 0)
class AO(nn.Module):
# Adaptive output module
def __init__(self, inchannels, outchannels, upfactor=2):
super(AO, self).__init__()
self.inchannels = inchannels
self.outchannels = outchannels
self.upfactor = upfactor
self.adapt_conv = nn.Sequential(
nn.Conv2d(in_channels=self.inchannels, out_channels=self.inchannels // 2, kernel_size=3, padding=1,
stride=1, bias=True), \
nn.BatchNorm2d(num_features=self.inchannels // 2), \
nn.ReLU(inplace=True), \
nn.Conv2d(in_channels=self.inchannels // 2, out_channels=self.outchannels, kernel_size=3, padding=1,
stride=1, bias=True), \
nn.Upsample(scale_factor=self.upfactor, mode='bilinear', align_corners=True))
self.init_params()
def forward(self, x):
x = self.adapt_conv(x)
return x
def init_params(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
# init.kaiming_normal_(m.weight, mode='fan_out')
init.normal_(m.weight, std=0.01)
# init.xavier_normal_(m.weight)
if m.bias is not None:
init.constant_(m.bias, 0)
elif isinstance(m, nn.ConvTranspose2d):
# init.kaiming_normal_(m.weight, mode='fan_out')
init.normal_(m.weight, std=0.01)
# init.xavier_normal_(m.weight)
if m.bias is not None:
init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d): # NN.Batchnorm2d
init.constant_(m.weight, 1)
init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
init.normal_(m.weight, std=0.01)
if m.bias is not None:
init.constant_(m.bias, 0)
class ASPP(nn.Module):
def __init__(self, inchannels=256, planes=128, rates=[1, 6, 12, 18]):
super(ASPP, self).__init__()
self.inchannels = inchannels
self.planes = planes
self.rates = rates
self.kernel_sizes = []
self.paddings = []
for rate in self.rates:
if rate == 1:
self.kernel_sizes.append(1)
self.paddings.append(0)
else:
self.kernel_sizes.append(3)
self.paddings.append(rate)
self.atrous_0 = nn.Sequential(
nn.Conv2d(in_channels=self.inchannels, out_channels=self.planes, kernel_size=self.kernel_sizes[0],
stride=1, padding=self.paddings[0], dilation=self.rates[0], bias=True),
nn.ReLU(inplace=True),
nn.BatchNorm2d(num_features=self.planes)
)
self.atrous_1 = nn.Sequential(
nn.Conv2d(in_channels=self.inchannels, out_channels=self.planes, kernel_size=self.kernel_sizes[1],
stride=1, padding=self.paddings[1], dilation=self.rates[1], bias=True),
nn.ReLU(inplace=True),
nn.BatchNorm2d(num_features=self.planes),
)
self.atrous_2 = nn.Sequential(
nn.Conv2d(in_channels=self.inchannels, out_channels=self.planes, kernel_size=self.kernel_sizes[2],
stride=1, padding=self.paddings[2], dilation=self.rates[2], bias=True),
nn.ReLU(inplace=True),
nn.BatchNorm2d(num_features=self.planes),
)
self.atrous_3 = nn.Sequential(
nn.Conv2d(in_channels=self.inchannels, out_channels=self.planes, kernel_size=self.kernel_sizes[3],
stride=1, padding=self.paddings[3], dilation=self.rates[3], bias=True),
nn.ReLU(inplace=True),
nn.BatchNorm2d(num_features=self.planes),
)
# self.conv = nn.Conv2d(in_channels=self.planes * 4, out_channels=self.inchannels, kernel_size=3, padding=1, stride=1, bias=True)
def forward(self, x):
x = torch.cat([self.atrous_0(x), self.atrous_1(x), self.atrous_2(x), self.atrous_3(x)], 1)
# x = self.conv(x)
return x
# ==============================================================================================================
class ResidualConv(nn.Module):
def __init__(self, inchannels):
super(ResidualConv, self).__init__()
# NN.BatchNorm2d
self.conv = nn.Sequential(
# nn.BatchNorm2d(num_features=inchannels),
nn.ReLU(inplace=False),
# nn.Conv2d(in_channels=inchannels, out_channels=inchannels, kernel_size=3, padding=1, stride=1, groups=inchannels,bias=True),
# nn.Conv2d(in_channels=inchannels, out_channels=inchannels, kernel_size=1, padding=0, stride=1, groups=1,bias=True)
nn.Conv2d(in_channels=inchannels, out_channels=inchannels / 2, kernel_size=3, padding=1, stride=1,
bias=False),
nn.BatchNorm2d(num_features=inchannels / 2),
nn.ReLU(inplace=False),
nn.Conv2d(in_channels=inchannels / 2, out_channels=inchannels, kernel_size=3, padding=1, stride=1,
bias=False)
)
self.init_params()
def forward(self, x):
x = self.conv(x) + x
return x
def init_params(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
# init.kaiming_normal_(m.weight, mode='fan_out')
init.normal_(m.weight, std=0.01)
# init.xavier_normal_(m.weight)
if m.bias is not None:
init.constant_(m.bias, 0)
elif isinstance(m, nn.ConvTranspose2d):
# init.kaiming_normal_(m.weight, mode='fan_out')
init.normal_(m.weight, std=0.01)
# init.xavier_normal_(m.weight)
if m.bias is not None:
init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d): # NN.BatchNorm2d
init.constant_(m.weight, 1)
init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
init.normal_(m.weight, std=0.01)
if m.bias is not None:
init.constant_(m.bias, 0)
class FeatureFusion(nn.Module):
def __init__(self, inchannels, outchannels):
super(FeatureFusion, self).__init__()
self.conv = ResidualConv(inchannels=inchannels)
# NN.BatchNorm2d
self.up = nn.Sequential(ResidualConv(inchannels=inchannels),
nn.ConvTranspose2d(in_channels=inchannels, out_channels=outchannels, kernel_size=3,
stride=2, padding=1, output_padding=1),
nn.BatchNorm2d(num_features=outchannels),
nn.ReLU(inplace=True))
def forward(self, lowfeat, highfeat):
return self.up(highfeat + self.conv(lowfeat))
def init_params(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
# init.kaiming_normal_(m.weight, mode='fan_out')
init.normal_(m.weight, std=0.01)
# init.xavier_normal_(m.weight)
if m.bias is not None:
init.constant_(m.bias, 0)
elif isinstance(m, nn.ConvTranspose2d):
# init.kaiming_normal_(m.weight, mode='fan_out')
init.normal_(m.weight, std=0.01)
# init.xavier_normal_(m.weight)
if m.bias is not None:
init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d): # NN.BatchNorm2d
init.constant_(m.weight, 1)
init.constant_(m.bias, 0)
elif isinstance(m, nn.Linear):
init.normal_(m.weight, std=0.01)
if m.bias is not None:
init.constant_(m.bias, 0)
class SenceUnderstand(nn.Module):
def __init__(self, channels):
super(SenceUnderstand, self).__init__()
self.channels = channels
self.conv1 = nn.Sequential(nn.Conv2d(in_channels=512, out_channels=512, kernel_size=3, padding=1),
nn.ReLU(inplace=True))
self.pool = nn.AdaptiveAvgPool2d(8)
self.fc = nn.Sequential(nn.Linear(512 * 8 * 8, self.channels),
nn.ReLU(inplace=True))
self.conv2 = nn.Sequential(
nn.Conv2d(in_channels=self.channels, out_channels=self.channels, kernel_size=1, padding=0),
nn.ReLU(inplace=True))
self.initial_params()
def forward(self, x):
n, c, h, w = x.size()
x = self.conv1(x)
x = self.pool(x)
x = x.view(n, -1)
x = self.fc(x)
x = x.view(n, self.channels, 1, 1)
x = self.conv2(x)
x = x.repeat(1, 1, h, w)
return x
def initial_params(self, dev=0.01):
for m in self.modules():
if isinstance(m, nn.Conv2d):
# print torch.sum(m.weight)
m.weight.data.normal_(0, dev)
if m.bias is not None:
m.bias.data.fill_(0)
elif isinstance(m, nn.ConvTranspose2d):
# print torch.sum(m.weight)
m.weight.data.normal_(0, dev)
if m.bias is not None:
m.bias.data.fill_(0)
elif isinstance(m, nn.Linear):
m.weight.data.normal_(0, dev)
if __name__ == '__main__':
net = DepthNet(depth=50, pretrained=True)
print(net)
inputs = torch.ones(4,3,128,128)
out = net(inputs)
print(out.size())
| {
"repo_name": "aim-uofa/AdelaiDepth",
"stars": "926",
"repo_language": "Python",
"file_name": "train.py",
"mime_type": "text/x-script.python"
} |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from multiprocessing.sharedctypes import Value
import os
import six
import yaml
import copy
import numpy as np
from ast import literal_eval
from lib.utils.collections import AttrDict
from lib.utils.misc import get_run_name
__C = AttrDict()
# Consumers can get config by:
cfg = __C
# Root directory of project
__C.ROOT_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
# ---------------------------------------------------------------------------- #
# Data configurations
# ---------------------------------------------------------------------------- #
__C.DATASET = AttrDict()
__C.DATASET.NAME = 'diversedepth'
__C.DATASET.RGB_PIXEL_MEANS = (0.485, 0.456, 0.406) # (102.9801, 115.9465, 122.7717)
__C.DATASET.RGB_PIXEL_VARS = (0.229, 0.224, 0.225) # (1, 1, 1)
# Scale the depth map
#__C.DATASET.DEPTH_SCALE = 10.0
__C.DATASET.CROP_SIZE = (448, 448) # (height, width)
# Camera Parameters
__C.DATASET.FOCAL_X = 256.0
__C.DATASET.FOCAL_Y = 256.0
# ---------------------------------------------------------------------------- #
# Models configurations
# ---------------------------------------------------------------------------- #
__C.MODEL = AttrDict()
__C.MODEL.INIT_TYPE = 'xavier'
# Configure the model type for the encoder, e.g.ResNeXt50_32x4d_body_stride16
__C.MODEL.ENCODER = 'resnet50_stride32'
__C.MODEL.MODEL_REPOSITORY = 'datasets/pretrained_model'
__C.MODEL.PRETRAINED_WEIGHTS = 'resnext50_32x4d.pth'
__C.MODEL.LOAD_IMAGENET_PRETRAINED_WEIGHTS = True
# Configure resnext
__C.MODEL.RESNET_BOTTLENECK_DIM = [64, 256, 512, 1024, 2048]
__C.MODEL.RESNET_BLOCK_DIM = [64, 64, 128, 256, 512]
# Configure the decoder
__C.MODEL.FCN_DIM_IN = [512, 256, 256, 256, 256, 256]
__C.MODEL.FCN_DIM_OUT = [256, 256, 256, 256, 256]
__C.MODEL.LATERAL_OUT = [512, 256, 256, 256]
# Configure input and output channel of the model
__C.MODEL.ENCODER_INPUT_C = 3
__C.MODEL.DECODER_OUTPUT_C = 1
# Configure weight for different losses
__C.MODEL.FREEZE_BACKBONE_BN = False
#__C.MODEL.USE_SYNCBN = True
__C.MODEL.DEVICE = "cuda"
# ---------------------------------------------------------------------------- #
# Training configurations
# ---------------------------------------------------------------------------- #
__C.TRAIN = AttrDict()
# Load run name, which is the combination of running time and host name
__C.TRAIN.RUN_NAME = get_run_name()
__C.TRAIN.OUTPUT_DIR = './outputs'
# Dir for checkpoint and logs
__C.TRAIN.LOG_DIR = os.path.join(__C.TRAIN.OUTPUT_DIR, cfg.TRAIN.RUN_NAME)
# Differ the learning rate between encoder and decoder
__C.TRAIN.SCALE_DECODER_LR = 1
__C.TRAIN.BASE_LR = 0.001
__C.TRAIN.MAX_ITER = 0
# Set training epoches, end at the last epoch of list
__C.TRAIN.EPOCH = 50
__C.TRAIN.MAX_ITER = 0
__C.TRAIN.LR_SCHEDULER_MULTISTEPS = [30000, 120000, 200000]
__C.TRAIN.LR_SCHEDULER_GAMMA = 0.1
__C.TRAIN.WARMUP_FACTOR = 1.0 / 3
__C.TRAIN.WARMUP_ITERS = 500 # Need to change
__C.TRAIN.WARMUP_METHOD = "linear"
# Snapshot (model checkpoint) period
__C.TRAIN.SNAPSHOT_ITERS = 5000
__C.TRAIN.VAL_STEP = 5000
__C.TRAIN.BATCHSIZE = 4
__C.TRAIN.GPU_NUM = 1
# Steps for LOG interval
__C.TRAIN.LOG_INTERVAL = 10
__C.TRAIN.LOAD_CKPT = None
# Configs for loss
__C.TRAIN.LOSS_MODE = '_vnl_ssil_ranking_'
__C.TRAIN.LOSS_AUXI_WEIGHT = 0.5
#__C.TRAIN.DIFF_LOSS_WEIGHT = 5
__C.TRAIN.OPTIM = 'SGD'
def print_configs(cfg):
import logging
logger = logging.getLogger(__name__)
message = ''
message += '----------------- Configs ---------------\n'
for k, kv in cfg.items():
if isinstance(kv, AttrDict):
name1 = '.'.join(['cfg', k])
message += '{:>50}: \n'.format(name1)
for k_, v_ in kv.items():
name2 = '.'.join([name1, k_])
message += '{:>50}: {:<30}\n'.format(str(name2), str(v_))
else:
message += '{:>50}: {:<30}\n'.format(k, kv)
message += '----------------- End -------------------'
logger.info(message)
def merge_cfg_from_file(train_args):
"""Load a yaml config file and merge it into the global config."""
# cfg_filename = train_args.cfg_file
# cfg_file = os.path.join(__C.ROOT_DIR, cfg_filename + '.yaml')
# with open(cfg_file, 'r') as f:
# yaml_cfg = AttrDict(yaml.safe_load(f))
# _merge_a_into_b(yaml_cfg, __C)
# __C.DATASET.DEPTH_MIN_LOG = np.log10(__C.DATASET.DEPTH_MIN)
# # Modify some configs
# __C.DATASET.DEPTH_BIN_INTERVAL = (np.log10(__C.DATASET.DEPTH_MAX) - np.log10(
# __C.DATASET.DEPTH_MIN)) / __C.MODEL.DECODER_OUTPUT_C
# # The boundary of each bin
# __C.DATASET.DEPTH_BIN_BORDER = np.array([__C.DATASET.DEPTH_MIN_LOG + __C.DATASET.DEPTH_BIN_INTERVAL * (i + 0.5)
# for i in range(__C.MODEL.DECODER_OUTPUT_C)])
# __C.DATASET.WCE_LOSS_WEIGHT = [[np.exp(-0.2 * (i - j) ** 2) for i in range(__C.MODEL.DECODER_OUTPUT_C)]
# for j in np.arange(__C.MODEL.DECODER_OUTPUT_C)]
if train_args.backbone == 'resnet50':
__C.MODEL.ENCODER = 'resnet50_stride32'
__C.MODEL.PRETRAINED_WEIGHTS = 'resnext50_32x4d.pth'
elif train_args.backbone == 'resnext101':
__C.MODEL.ENCODER = 'resnext101_stride32x8d'
__C.MODEL.PRETRAINED_WEIGHTS = 'resnext101_stride32x8d.pth'
else:
raise ValueError
for k, v in vars(train_args).items():
if k.upper() in __C.TRAIN.keys():
__C.TRAIN[k.upper()] = getattr(train_args, k)
__C.TRAIN.LOG_DIR = os.path.join(__C.TRAIN.OUTPUT_DIR, cfg.TRAIN.RUN_NAME)
def _merge_a_into_b(a, b, stack=None):
"""Merge config dictionary a into config dictionary b, clobbering the
options in b whenever they are also specified in a.
"""
assert isinstance(a, AttrDict), 'Argument `a` must be an AttrDict'
assert isinstance(b, AttrDict), 'Argument `b` must be an AttrDict'
for k, v_ in a.items():
full_key = '.'.join(stack) + '.' + k if stack is not None else k
# a must specify keys that are in b
if k not in b:
# if _key_is_deprecated(full_key):
# continue
# elif _key_is_renamed(full_key):
# _raise_key_rename_error(full_key)
# else:
raise KeyError('Non-existent config key: {}'.format(full_key))
v = copy.deepcopy(v_)
v = _decode_cfg_value(v)
v = _check_and_coerce_cfg_value_type(v, b[k], k, full_key)
# Recursively merge dicts
if isinstance(v, AttrDict):
try:
stack_push = [k] if stack is None else stack + [k]
_merge_a_into_b(v, b[k], stack=stack_push)
except BaseException:
raise
else:
b[k] = v
def _decode_cfg_value(v):
"""Decodes a raw config value (e.g., from a yaml config files or command
line argument) into a Python object.
"""
# Configs parsed from raw yaml will contain dictionary keys that need to be
# converted to AttrDict objects
if isinstance(v, dict):
return AttrDict(v)
# All remaining processing is only applied to strings
if not isinstance(v, six.string_types):
return v
# Try to interpret `v` as a:
# string, number, tuple, list, dict, boolean, or None
try:
v = literal_eval(v)
# The following two excepts allow v to pass through when it represents a
# string.
#
# Longer explanation:
# The type of v is always a string (before calling literal_eval), but
# sometimes it *represents* a string and other times a data structure, like
# a list. In the case that v represents a string, what we got back from the
# yaml parser is 'foo' *without quotes* (so, not '"foo"'). literal_eval is
# ok with '"foo"', but will raise a ValueError if given 'foo'. In other
# cases, like paths (v = 'foo/bar' and not v = '"foo/bar"'), literal_eval
# will raise a SyntaxError.
except ValueError:
pass
except SyntaxError:
pass
return v
def _check_and_coerce_cfg_value_type(value_a, value_b, key, full_key):
"""Checks that `value_a`, which is intended to replace `value_b` is of the
right type. The type is correct if it matches exactly or is one of a few
cases in which the type can be easily coerced.
"""
# The types must match (with some exceptions)
type_b = type(value_b)
type_a = type(value_a)
if type_a is type_b:
return value_a
# Exceptions: numpy arrays, strings, tuple<->list
if isinstance(value_b, np.ndarray):
value_a = np.array(value_a, dtype=value_b.dtype)
elif isinstance(value_b, six.string_types):
value_a = str(value_a)
elif isinstance(value_a, tuple) and isinstance(value_b, list):
value_a = list(value_a)
elif isinstance(value_a, list) and isinstance(value_b, tuple):
value_a = tuple(value_a)
else:
raise ValueError(
'Type mismatch ({} vs. {}) with values ({} vs. {}) for config '
'key: {}'.format(type_b, type_a, value_b, value_a, full_key)
)
return value_a
| {
"repo_name": "aim-uofa/AdelaiDepth",
"stars": "926",
"repo_language": "Python",
"file_name": "train.py",
"mime_type": "text/x-script.python"
} |
import importlib
import os
import dill
import logging
import cv2
import numpy as np
import torch
import torch.nn as nn
import matplotlib.pyplot as plt
from collections import OrderedDict
from lib.configs.config import cfg
logger = logging.getLogger(__name__)
def get_func(func_name):
"""Helper to return a function object by name. func_name must identify a
function in this module or the path to a function relative to the base
'modeling' module.
"""
if func_name == '':
return None
try:
parts = func_name.split('.')
# Refers to a function in this module
if len(parts) == 1:
return globals()[parts[0]]
# Otherwise, assume we're referencing a module under modeling
module_name = 'lib.models.' + '.'.join(parts[:-1])
module = importlib.import_module(module_name)
return getattr(module, parts[-1])
except Exception:
logger.error('Failed to f1ind function: %s', func_name)
raise
def load_ckpt(args, model, optimizer=None, scheduler=None, val_err=[]):
"""
Load checkpoint.
"""
if os.path.isfile(args.load_ckpt):
logger.info("loading checkpoint %s", args.load_ckpt)
checkpoint = torch.load(args.load_ckpt, map_location=lambda storage, loc: storage, pickle_module=dill)
model_state_dict_keys = model.state_dict().keys()
checkpoint_state_dict_noprefix = strip_prefix_if_present(checkpoint['model_state_dict'], "module.")
if all(key.startswith('module.') for key in model_state_dict_keys):
model.module.load_state_dict(checkpoint_state_dict_noprefix)
else:
model.load_state_dict(checkpoint_state_dict_noprefix)
if args.resume:
#args.batchsize = checkpoint['batch_size']
args.start_step = checkpoint['step']
args.start_epoch = checkpoint['epoch']
optimizer.load_state_dict(checkpoint['optimizer'])
scheduler.load_state_dict(checkpoint['scheduler'])
scheduler.__setattr__('last_epoch', checkpoint['step'])
if 'val_err' in checkpoint: # For backward compatibility
val_err[0] = checkpoint['val_err']
del checkpoint
torch.cuda.empty_cache()
def strip_prefix_if_present(state_dict, prefix):
keys = sorted(state_dict.keys())
if not all(key.startswith(prefix) for key in keys):
return state_dict
stripped_state_dict = OrderedDict()
for key, value in state_dict.items():
stripped_state_dict[key.replace(prefix, "")] = value
return stripped_state_dict
def save_ckpt(args, step, epoch, model, optimizer, scheduler, val_err={}):
"""Save checkpoint"""
ckpt_dir = os.path.join(cfg.TRAIN.LOG_DIR, 'ckpt')
if not os.path.exists(ckpt_dir):
os.makedirs(ckpt_dir)
save_name = os.path.join(ckpt_dir, 'epoch%d_step%d.pth' %(epoch, step))
if isinstance(model, nn.DataParallel):
model = model.module
torch.save({
'step': step,
'epoch': epoch,
'batch_size': args.batchsize,
'scheduler': scheduler.state_dict(),
'val_err': val_err,
'model_state_dict': model.state_dict(),
'optimizer': optimizer.state_dict()},
save_name, pickle_module=dill)
logger.info('save model: %s', save_name)
# save image to the disk
def save_images(data, pred, scale=60000.):
rgb = data['A_raw']
gt = data['B_raw']
if type(rgb).__module__ != np.__name__:
rgb = rgb.cpu().numpy()
rgb = np.squeeze(rgb)
rgb = rgb[:, :, ::-1]
if type(gt).__module__ != np.__name__:
gt = gt.cpu().numpy()
gt = np.squeeze(gt)
if type(pred).__module__ != np.__name__:
pred = pred.cpu().numpy()
pred = np.squeeze(pred)
model_name = (cfg.DATA.LOAD_MODEL_NAME.split('/')[-1]).split('.')[0]
image_dir = os.path.join(cfg.TRAIN.OUTPUT_ROOT_DIR, '../evaluation', model_name)
if not os.path.exists(image_dir):
os.makedirs(image_dir)
if 'kitti' in cfg.DATASET:
name = data['A_paths'][0].split('/')[-4] + '-' + data['A_paths'][0].split('/')[-1].split('.')[0]
else:
name = data['A_paths'][0].split('/')[-1].split('.')[0]
rgb_name = '%s_%s.png' % (name, 'rgb')
gt_name = '%s_%s.png' % (name, 'gt')
gt_raw_name = '%s_%s.png' % (name, 'gt-raw')
pred_name = '%s_%s.png' % (name, 'pred')
pred_raw_name = '%s_%s.png' % (name, 'pred-raw')
plt.imsave(os.path.join(image_dir, rgb_name), rgb)
if len(data['B_raw'].shape) != 2:
plt.imsave(os.path.join(image_dir, gt_name), gt, cmap='rainbow')
gt_scale = gt * scale
gt_scale = gt_scale.astype('uint16')
cv2.imwrite(os.path.join(image_dir, gt_raw_name), gt_scale)
plt.imsave(os.path.join(image_dir, pred_name), pred, cmap='rainbow')
pred_raw = pred * scale
pred_raw = pred_raw.astype('uint16')
cv2.imwrite(os.path.join(image_dir, pred_raw_name), pred_raw) | {
"repo_name": "aim-uofa/AdelaiDepth",
"stars": "926",
"repo_language": "Python",
"file_name": "train.py",
"mime_type": "text/x-script.python"
} |
"""Utilities for training."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from collections import defaultdict, OrderedDict
import datetime
from lib.configs.config import cfg
from lib.utils.logging import log_stats
from lib.utils.logging import SmoothedValue
from lib.utils.timer import Timer
class TrainingStats(object):
"""Track vital training statistics."""
def __init__(self, args, log_period=20, tensorboard_logger=None):
# Output logging period in SGD iterations
self.args = args
self.log_period = log_period
self.tblogger = tensorboard_logger
self.tb_ignored_keys = ['iter', 'eta', 'epoch', 'time']
self.iter_timer = Timer()
# Window size for smoothing tracked values (with median filtering)
self.filter_size = log_period
def create_smoothed_value():
return SmoothedValue(self.filter_size)
self.smoothed_losses = defaultdict(create_smoothed_value)
self.smoothed_metrics = defaultdict(create_smoothed_value)
self.smoothed_total_loss = SmoothedValue(self.filter_size)
def IterTic(self):
self.iter_timer.tic()
def IterToc(self):
return self.iter_timer.toc(average=False)
def ResetIterTimer(self):
self.iter_timer.reset()
def UpdateIterStats(self, loss):
"""Update tracked iteration statistics."""
total_loss = 0
for k in loss:
# all losses except the total loss: loss['all']
if k != 'total_loss':
self.smoothed_losses[k].AddValue(float(loss[k]))
total_loss += loss['total_loss']
self.smoothed_total_loss.AddValue(float(total_loss))
def LogIterStats(self, cur_iter, cur_epoch, optimizer, val_err={}):
"""Log the tracked statistics."""
if (cur_iter % self.log_period == 0):
stats = self.GetStats(cur_iter, cur_epoch, optimizer, val_err)
log_stats(stats, self.args)
if self.tblogger:
self.tb_log_stats(stats, cur_iter)
def tb_log_stats(self, stats, cur_iter):
"""Log the tracked statistics to tensorboard"""
for k in stats:
if k not in self.tb_ignored_keys:
v = stats[k]
if isinstance(v, dict):
self.tb_log_stats(v, cur_iter)
else:
self.tblogger.add_scalar(k, v, cur_iter)
def GetStats(self, cur_iter, cur_epoch, optimizer, val_err = {}):
eta_seconds = self.iter_timer.average_time * (
cfg.TRAIN.MAX_ITER - cur_iter
)
eta = str(datetime.timedelta(seconds=int(eta_seconds)))
stats = OrderedDict(
iter=cur_iter, # 1-indexed
time=self.iter_timer.average_time,
eta=eta,
total_loss=self.smoothed_total_loss.GetMedianValue(),
epoch=cur_epoch,
)
optimizer_state_dict = optimizer.state_dict()
lr = {}
for i in range(len(optimizer_state_dict['param_groups'])):
lr_name = 'group%d_lr' % i
lr[lr_name] = optimizer_state_dict['param_groups'][i]['lr']
stats['lr'] = OrderedDict(lr)
for k, v in self.smoothed_losses.items():
stats[k] = OrderedDict([(k, v.GetMedianValue())])
stats['val_err'] = OrderedDict(val_err)
return stats
| {
"repo_name": "aim-uofa/AdelaiDepth",
"stars": "926",
"repo_language": "Python",
"file_name": "train.py",
"mime_type": "text/x-script.python"
} |
import os
import socket
from datetime import datetime
def get_run_name():
""" A unique name for each run """
return datetime.now().strftime(
'%b%d-%H-%M-%S') + '_' + socket.gethostname()
IMG_EXTENSIONS = ['.jpg', '.jpeg', '.png', '.ppm', '.bmp', '.pgm']
def is_image_file(filename):
"""Checks if a file is an image.
Args:
filename (string): path to a file
Returns:
bool: True if the filename ends with a known image extension
"""
filename_lower = filename.lower()
return any(filename_lower.endswith(ext) for ext in IMG_EXTENSIONS)
def get_imagelist_from_dir(dirpath):
images = []
for f in os.listdir(dirpath):
if is_image_file(f):
images.append(os.path.join(dirpath, f))
return images
| {
"repo_name": "aim-uofa/AdelaiDepth",
"stars": "926",
"repo_language": "Python",
"file_name": "train.py",
"mime_type": "text/x-script.python"
} |
"""
This file contains primitives for multi-gpu communication.
This is useful when doing distributed training.
"""
import pickle
import torch
import torch.distributed as dist
def get_world_size():
if not dist.is_available():
return 1
if not dist.is_initialized():
return 1
return dist.get_world_size()
def get_rank():
if not dist.is_available():
return 0
if not dist.is_initialized():
return 0
return dist.get_rank()
def is_main_process():
return get_rank() == 0
def synchronize():
"""
Helper function to synchronize (barrier) among all processes when
using distributed training
"""
if not dist.is_available():
return
if not dist.is_initialized():
return
world_size = dist.get_world_size()
if world_size == 1:
return
dist.barrier()
def all_gather(data):
"""
Run all_gather on arbitrary picklable data (not necessarily tensors)
Args:
data: any picklable object
Returns:
list[data]: list of data gathered from each rank
"""
world_size = get_world_size()
if world_size == 1:
return [data]
# serialized to a Tensor
buffer = pickle.dumps(data)
storage = torch.ByteStorage.from_buffer(buffer)
tensor = torch.ByteTensor(storage).to("cuda")
# obtain Tensor size of each rank
local_size = torch.IntTensor([tensor.numel()]).to("cuda")
size_list = [torch.IntTensor([0]).to("cuda") for _ in range(world_size)]
dist.all_gather(size_list, local_size)
size_list = [int(size.item()) for size in size_list]
max_size = max(size_list)
# receiving Tensor from all ranks
# we pad the tensor because torch all_gather does not support
# gathering tensors of different shapes
tensor_list = []
for _ in size_list:
tensor_list.append(torch.ByteTensor(size=(max_size,)).to("cuda"))
if local_size != max_size:
padding = torch.ByteTensor(size=(max_size - local_size,)).to("cuda")
tensor = torch.cat((tensor, padding), dim=0)
dist.all_gather(tensor_list, tensor)
data_list = []
for size, tensor in zip(size_list, tensor_list):
buffer = tensor.cpu().numpy().tobytes()[:size]
data_list.append(pickle.loads(buffer))
return data_list
def reduce_dict(input_dict, average=True):
"""
Args:
input_dict (dict): all the values will be reduced
average (bool): whether to do average or sum
Reduce the values in the dictionary from all processes so that process with rank
0 has the averaged results. Returns a dict with the same fields as
input_dict, after reduction.
"""
world_size = get_world_size()
if world_size < 2:
return input_dict
with torch.no_grad():
names = []
values = []
# sort the keys so that they are consistent across processes
for k in sorted(input_dict.keys()):
names.append(k)
values.append(input_dict[k])
values = torch.stack(values, dim=0)
dist.reduce(values, dst=0)
if dist.get_rank() == 0 and average:
# only main process gets accumulated, so only divide by
# world_size in this case
values /= world_size
reduced_dict = {k: v for k, v in zip(names, values)}
return reduced_dict
def is_pytorch_1_1_0_or_later():
return [int(_) for _ in torch.__version__.split("+")[0].split(".")[:3]] >= [1, 1, 0]
| {
"repo_name": "aim-uofa/AdelaiDepth",
"stars": "926",
"repo_language": "Python",
"file_name": "train.py",
"mime_type": "text/x-script.python"
} |
"""Utilities for logging."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from collections import deque
import logging
import numpy as np
import sys
import os
from lib.configs.config import cfg
def log_stats(stats, args):
logger = logging.getLogger(__name__)
"""Log training statistics to terminal"""
lines = "[Step %d/%d] [Epoch %d/%d] [%s]\n" % (
stats['iter'], cfg.TRAIN.MAX_ITER, stats['epoch'], args.epoch, args.dataset)
lines += "\t\tloss: %.3f, time: %.6f, eta: %s\n" % (
stats['total_loss'], stats['time'], stats['eta'])
for k in stats:
if 'loss' in k and 'total_loss' not in k:
lines += "\t\t" + ", ".join("%s: %.3f" % (k, v) for k, v in stats[k].items()) + ", "
# validate criteria
lines += "\t\t" + ", ".join("%s: %.6f" % (k, v) for k, v in stats['val_err'].items()) + ", "
lines += '\n'
# lr in different groups
lines += "\t\t" + ", ".join("%s: %.6f" % (k, v) for k, v in stats['lr'].items()) + ", "
lines += '\n'
logger.info(lines[:-1]) # remove last new linen_pxl
class SmoothedValue(object):
"""Track a series of values and provide access to smoothed values over a
window or the global series average.
"""
def __init__(self, window_size):
self.deque = deque(maxlen=window_size)
self.series = []
self.total = 0.0
self.count = 0
def AddValue(self, value, size=1):
self.deque.append(value)
self.series.append(value)
self.count += size
self.total += value
def GetMedianValue(self):
return np.median(self.deque)
def GetAverageValue(self):
return np.mean(self.deque)
def GetGlobalAverageValue(self):
return self.total / self.count
def ClearValue(self):
self.count = 0
self.total = 0
def setup_logging(name):
FORMAT = '%(levelname)s %(filename)s:%(lineno)4d: %(message)s'
# Manually clear root loggers to prevent any module that may have called
# logging.basicConfig() from blocking our logging setup
logging.root.handlers = []
logging.basicConfig(level=logging.INFO, format=FORMAT, stream=sys.stdout)
logger = logging.getLogger(name)
return logger
def setup_distributed_logger(name, save_dir, distributed_rank, filename="log.txt"):
logger = logging.getLogger(name)
logger.setLevel(logging.DEBUG)
# don't log results for the non-master process
if distributed_rank > 0:
return logger
ch = logging.StreamHandler(stream=sys.stdout)
ch.setLevel(logging.DEBUG)
formatter = logging.Formatter("%(asctime)s %(name)s %(levelname)s: %(message)s")
ch.setFormatter(formatter)
logger.addHandler(ch)
if save_dir:
fh = logging.FileHandler(os.path.join(save_dir, filename))
fh.setLevel(logging.DEBUG)
fh.setFormatter(formatter)
logger.addHandler(fh)
return logger
| {
"repo_name": "aim-uofa/AdelaiDepth",
"stars": "926",
"repo_language": "Python",
"file_name": "train.py",
"mime_type": "text/x-script.python"
} |
import torch
from bisect import bisect_right
def make_lr_scheduler(cfg, optimizer):
return WarmupMultiStepLR(
optimizer,
cfg.TRAIN.LR_SCHEDULER_MULTISTEPS,
cfg.TRAIN.LR_SCHEDULER_GAMMA,
warmup_factor=cfg.TRAIN.WARMUP_FACTOR,
warmup_iters=cfg.TRAIN.WARMUP_ITERS,
warmup_method=cfg.TRAIN.WARMUP_METHOD,
)
class WarmupMultiStepLR(torch.optim.lr_scheduler._LRScheduler):
def __init__(
self,
optimizer,
milestones,
gamma=0.1,
warmup_factor=1.0 / 3,
warmup_iters=500,
warmup_method="linear",
last_epoch=-1,
):
if not list(milestones) == sorted(milestones):
raise ValueError(
"Milestones should be a list of" " increasing integers. Got {}",
milestones,
)
if warmup_method not in ("constant", "linear"):
raise ValueError(
"Only 'constant' or 'linear' warmup_method accepted"
"got {}".format(warmup_method)
)
self.milestones = milestones
self.gamma = gamma
self.warmup_factor = warmup_factor
self.warmup_iters = warmup_iters
self.warmup_method = warmup_method
super(WarmupMultiStepLR, self).__init__(optimizer, last_epoch)
def get_lr(self):
warmup_factor = 1
if self.last_epoch < self.warmup_iters:
if self.warmup_method == "constant":
warmup_factor = self.warmup_factor
elif self.warmup_method == "linear":
alpha = float(self.last_epoch) / self.warmup_iters
warmup_factor = self.warmup_factor * (1 - alpha) + alpha
return [
base_lr
* warmup_factor
* self.gamma ** bisect_right(self.milestones, self.last_epoch)
for base_lr in self.base_lrs
] | {
"repo_name": "aim-uofa/AdelaiDepth",
"stars": "926",
"repo_language": "Python",
"file_name": "train.py",
"mime_type": "text/x-script.python"
} |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import time
class Timer(object):
"""A simple timer."""
def __init__(self):
self.reset()
def tic(self):
# using time.time instead of time.clock because time time.clock
# does not normalize for multithreading
self.start_time = time.time()
def toc(self, average=True):
self.diff = time.time() - self.start_time
self.total_time += self.diff
self.calls += 1
self.average_time = self.total_time / self.calls
if average:
return self.average_time
else:
return self.diff
def reset(self):
self.total_time = 0.
self.calls = 0
self.start_time = 0.
self.diff = 0.
self.average_time = 0.
| {
"repo_name": "aim-uofa/AdelaiDepth",
"stars": "926",
"repo_language": "Python",
"file_name": "train.py",
"mime_type": "text/x-script.python"
} |
"""A simple attribute dictionary used for representing configuration options."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
class AttrDict(dict):
IMMUTABLE = '__immutable__'
def __init__(self, *args, **kwargs):
super(AttrDict, self).__init__(*args, **kwargs)
self.__dict__[AttrDict.IMMUTABLE] = False
def __getattr__(self, name):
if name in self.__dict__:
return self.__dict__[name]
elif name in self:
return self[name]
else:
raise AttributeError(name)
def __setattr__(self, name, value):
if not self.__dict__[AttrDict.IMMUTABLE]:
if name in self.__dict__:
self.__dict__[name] = value
else:
self[name] = value
else:
raise AttributeError(
'Attempted to set "{}" to "{}", but AttrDict is immutable'.
format(name, value)
)
def immutable(self, is_immutable):
"""Set immutability to is_immutable and recursively apply the setting
to all nested AttrDicts.
"""
self.__dict__[AttrDict.IMMUTABLE] = is_immutable
# Recursively set immutable state
for v in self.__dict__.values():
if isinstance(v, AttrDict):
v.immutable(is_immutable)
for v in self.values():
if isinstance(v, AttrDict):
v.immutable(is_immutable)
def is_immutable(self):
return self.__dict__[AttrDict.IMMUTABLE]
| {
"repo_name": "aim-uofa/AdelaiDepth",
"stars": "926",
"repo_language": "Python",
"file_name": "train.py",
"mime_type": "text/x-script.python"
} |
import logging
import torch
import numpy as np
logger = logging.getLogger(__name__)
def recover_metric_depth(pred, gt, mask0=None):
if type(pred).__module__ == torch.__name__:
pred = pred.cpu().numpy()
if type(gt).__module__ == torch.__name__:
gt = gt.cpu().numpy()
gt = gt.squeeze()
pred = pred.squeeze()
mask = (gt > 1e-8) #& (pred > 1e-8)
if mask0 is not None and mask0.sum() > 0:
if type(mask0).__module__ == torch.__name__:
mask0 = mask0.cpu().numpy()
mask0 = mask0.squeeze()
mask0 = mask0 > 0
mask = mask & mask0
gt_mask = gt[mask]
pred_mask = pred[mask]
a, b = np.polyfit(pred_mask, gt_mask, deg=1)
if a > 0:
pred_metric = a * pred + b
else:
pred_mean = np.mean(pred_mask)
gt_mean = np.mean(gt_mask)
pred_metric = pred * (gt_mean / pred_mean)
return pred_metric
def validate_rel_depth_err(pred, gt, smoothed_criteria, mask=None, scale=10.):
if type(pred).__module__ == torch.__name__:
pred = pred.cpu().numpy()
if type(gt).__module__ == torch.__name__:
gt = gt.cpu().numpy()
gt = np.squeeze(gt)
pred = np.squeeze(pred)
if mask is not None:
gt = gt[mask[0]:mask[1], mask[2]:mask[3]]
pred = pred[mask[0]:mask[1], mask[2]:mask[3]]
if pred.shape != gt.shape:
logger.info('The shapes of dt and gt are not same!')
return -1
mask2 = gt > 0
gt = gt[mask2]
pred = pred[mask2]
# invalid evaluation image
if gt.size < 10:
return smoothed_criteria
# Scale matching
#pred = recover_metric_depth(pred, gt)
n_pxl = gt.size
gt_scale = gt * scale
pred_scale = pred * scale
# Mean Absolute Relative Error
rel = np.abs(gt_scale - pred_scale) / gt_scale # compute errors
abs_rel_sum = np.sum(rel)
smoothed_criteria['err_absRel'].AddValue(np.float64(abs_rel_sum), n_pxl)
# WHDR error
whdr_err_sum, eval_num = weighted_human_disagreement_rate(gt_scale, pred_scale)
smoothed_criteria['err_whdr'].AddValue(np.float64(whdr_err_sum), eval_num)
return smoothed_criteria
def evaluate_rel_err(pred, gt, smoothed_criteria, mask_invalid=None, scale=10.0 ):
if type(pred).__module__ != np.__name__:
pred = pred.cpu().numpy()
if type(gt).__module__ != np.__name__:
gt = gt.cpu().numpy()
pred = np.squeeze(pred)
gt = np.squeeze(gt)
if pred.shape != gt.shape:
logger.info('The shapes of dt and gt are not same!')
return -1
if mask_invalid is not None:
gt = gt[~mask_invalid]
pred = pred[~mask_invalid]
mask = (gt > 1e-9) & (pred > 1e-9)
gt = gt[mask]
pred = pred[mask]
n_pxl = gt.size
gt_scale = gt * scale
pred_scale = pred * scale
# invalid evaluation image
if gt_scale.size < 10:
print('Valid pixel size:', gt_scale.size, 'Invalid evaluation!!!!')
return smoothed_criteria
#Mean Absolute Relative Error
rel = np.abs(gt - pred) / gt# compute errors
abs_rel_sum = np.sum(rel)
smoothed_criteria['err_absRel'].AddValue(np.float64(abs_rel_sum), n_pxl)
#Square Mean Relative Error
s_rel = ((gt_scale - pred_scale) * (gt_scale - pred_scale)) / (gt_scale * gt_scale)# compute errors
squa_rel_sum = np.sum(s_rel)
smoothed_criteria['err_squaRel'].AddValue(np.float64(squa_rel_sum), n_pxl)
#Root Mean Square error
square = (gt_scale - pred_scale) ** 2
rms_squa_sum = np.sum(square)
smoothed_criteria['err_rms'].AddValue(np.float64(rms_squa_sum), n_pxl)
#Log Root Mean Square error
log_square = (np.log(gt_scale) - np.log(pred_scale)) **2
log_rms_sum = np.sum(log_square)
smoothed_criteria['err_logRms'].AddValue(np.float64(log_rms_sum), n_pxl)
# Scale invariant error
diff_log = np.log(pred_scale) - np.log(gt_scale)
diff_log_sum = np.sum(diff_log)
smoothed_criteria['err_silog'].AddValue(np.float64(diff_log_sum), n_pxl)
diff_log_2 = diff_log ** 2
diff_log_2_sum = np.sum(diff_log_2)
smoothed_criteria['err_silog2'].AddValue(np.float64(diff_log_2_sum), n_pxl)
# Mean log10 error
log10_sum = np.sum(np.abs(np.log10(gt) - np.log10(pred)))
smoothed_criteria['err_log10'].AddValue(np.float64(log10_sum), n_pxl)
#Delta
gt_pred = gt_scale / pred_scale
pred_gt = pred_scale / gt_scale
gt_pred = np.reshape(gt_pred, (1, -1))
pred_gt = np.reshape(pred_gt, (1, -1))
gt_pred_gt = np.concatenate((gt_pred, pred_gt), axis=0)
ratio_max = np.amax(gt_pred_gt, axis=0)
delta_1_sum = np.sum(ratio_max < 1.25)
smoothed_criteria['err_delta1'].AddValue(np.float64(delta_1_sum), n_pxl)
delta_2_sum = np.sum(ratio_max < 1.25**2)
smoothed_criteria['err_delta2'].AddValue(np.float64(delta_2_sum), n_pxl)
delta_3_sum = np.sum(ratio_max < 1.25**3)
smoothed_criteria['err_delta3'].AddValue(np.float64(delta_3_sum), n_pxl)
# WHDR error
whdr_err_sum, eval_num = weighted_human_disagreement_rate(gt_scale, pred_scale)
smoothed_criteria['err_whdr'].AddValue(np.float64(whdr_err_sum), eval_num)
return smoothed_criteria
def weighted_human_disagreement_rate(gt, pred):
p12_index = select_index(gt)
gt_reshape = np.reshape(gt, gt.size)
pred_reshape = np.reshape(pred, pred.size)
mask = gt > 0
gt_p1 = gt_reshape[mask][p12_index['p1']]
gt_p2 = gt_reshape[mask][p12_index['p2']]
pred_p1 = pred_reshape[mask][p12_index['p1']]
pred_p2 = pred_reshape[mask][p12_index['p2']]
p12_rank_gt = np.zeros_like(gt_p1)
p12_rank_gt[gt_p1 > gt_p2] = 1
p12_rank_gt[gt_p1 < gt_p2] = -1
p12_rank_pred = np.zeros_like(gt_p1)
p12_rank_pred[pred_p1 > pred_p2] = 1
p12_rank_pred[pred_p1 < pred_p2] = -1
err = np.sum(p12_rank_gt != p12_rank_pred)
valid_pixels = gt_p1.size
return err, valid_pixels
def select_index(gt_depth, select_size=10000):
valid_size = np.sum(gt_depth>0)
try:
p = np.random.choice(valid_size, select_size*2, replace=False)
except:
p = np.random.choice(valid_size, select_size*2*2, replace=True)
np.random.shuffle(p)
p1 = p[0:select_size*2:2]
p2 = p[1:select_size*2:2]
p12_index = {'p1': p1, 'p2': p2}
return p12_index
| {
"repo_name": "aim-uofa/AdelaiDepth",
"stars": "926",
"repo_language": "Python",
"file_name": "train.py",
"mime_type": "text/x-script.python"
} |
import importlib
import math
import logging
import numpy as np
import torch.utils.data
import torch.distributed as dist
logger = logging.getLogger(__name__)
# class MultipleDataLoaderDistributed():
# def __init__(self, opt, sample_ratio=0.1):
# self.opt = opt
# self.multi_datasets, self.dataset_indices_list = create_multiple_dataset(opt)
# self.datasizes = [len(dataset) for dataset in self.multi_datasets]
# self.merged_dataset = torch.utils.data.ConcatDataset(self.multi_datasets)
# #self.custom_multi_sampler_dist = CustomerMultiDataSamples(self.dataset_indices_list, sample_ratio, self.opt)
# self.custom_multi_sampler_dist = CustomerMultiDataSampler(opt, self.merged_dataset, opt.world_size, opt.phase)
# self.curr_sample_size = self.custom_multi_sampler_dist.num_samples
# self.dist_sample_size = self.custom_multi_sampler_dist.num_dist_samples
# self.dataloader = torch.utils.data.DataLoader(
# dataset=self.merged_dataset,
# batch_size=opt.batchsize,
# num_workers=opt.thread,
# sampler=self.custom_multi_sampler_dist,
# drop_last=True)
#
# def load_data(self):
# return self
#
# def __len__(self):
# return np.sum(self.datasizes)
#
# def __iter__(self):
# for i, data in enumerate(self.dataloader):
# if i * self.opt.batchsize >= float("inf"):
# break
# yield data
def MultipleDataLoaderDistributed(opt, sample_ratio=1):
opt = opt
#multi_datasets, dataset_indices_list = create_multiple_dataset(opt)
multi_datasets = create_multiple_dataset(opt)
#multi_datasizes = [len(dataset) for dataset in multi_datasets]
merged_dataset = torch.utils.data.ConcatDataset(multi_datasets)
#custom_multi_sampler_dist = CustomerMultiDataSamples(dataset_indices_list, sample_ratio, opt)
custom_multi_sampler_dist = CustomerMultiDataSampler(opt, merged_dataset, opt.world_size, opt.phase)
curr_sample_size = custom_multi_sampler_dist.num_samples
dist_sample_size = custom_multi_sampler_dist.num_dist_samples
dataloader = torch.utils.data.DataLoader(
dataset=merged_dataset,
batch_size=opt.batchsize,
num_workers=opt.thread,
sampler=custom_multi_sampler_dist,
drop_last=True)
return dataloader, curr_sample_size
class CustomerMultiDataSampler(torch.utils.data.Sampler):
"""
Construct a sample method. Sample former ratio_samples of datasets randomly.
"""
def __init__(self, args, multi_dataset, num_replicas, split, sample_ratio=1.0):
self.args = args
self.num_replicas = num_replicas
self.phase = split
self.rank = args.global_rank
#self.logger = logger
self.multi_dataset = multi_dataset
self.create_samplers()
self.num_indices = np.array([len(i) for i in self.extended_indices_list])
#self.num_samples = self.num_indices.astype(np.uint32)
self.num_samples = (self.num_indices * sample_ratio).astype(np.uint32)
self.max_indices = np.array([max(i) for i in self.extended_indices_list])
self.total_sampled_size = np.sum(self.num_samples)
self.num_dist_samples = int(
math.ceil(self.total_sampled_size * 1.0 / self.num_replicas)
)
self.total_dist_size = self.num_dist_samples * self.num_replicas
logstr = ",".join(["%s sampled data size: %d" % (args.dataset_list[i], self.num_samples[i])
for i in range(self.num_indices.size)]
)
logger.info(logstr)
def __iter__(self):
self.create_samplers()
cum_sum = np.cumsum(np.append([0], self.max_indices))
indices_array = [[self.extended_indices_list[data_i][i] + cum_sum[data_i]
for i in range(int(num))]for data_i, num in enumerate(self.num_samples)]
if "train" in self.phase:
# data list is mapped to the order [A, B, C, A, B, C....]
indices_array = np.array(indices_array).transpose(1, 0).reshape(-1)
else:
indices_array = np.concatenate(indices_array[:])
# add extra samples to make it evenly divisible
diff_size = int(self.total_dist_size - self.total_sampled_size)
if diff_size > 0:
extended_indices_dist = np.append(indices_array, indices_array[:diff_size])
else:
extended_indices_dist = indices_array
assert extended_indices_dist.size == self.total_dist_size
# subsample
offset = self.num_dist_samples * self.rank
rank_indices = extended_indices_dist[offset : offset + self.num_dist_samples]
assert rank_indices.size == self.num_dist_samples
for id in rank_indices:
yield id
def __len__(self):
return self.total_sampled_size
def create_samplers(self):
self.extended_indices_list = []
dataset_indices_lists = []
indices_len = []
datasets_num = len(self.multi_dataset.datasets)
for dataset_i in self.multi_dataset.datasets:
# The list of indices of each dataset
dataset_indices_lists.append(np.random.permutation(np.arange(len(dataset_i.curriculum_list))))
indices_len.append(len(dataset_i.curriculum_list))
# the max size of all datasets
max_len = np.max(indices_len)
if "train" == self.phase:
for data_list in dataset_indices_lists:
cp = max_len // data_list.size
size_i = data_list.size
tmp = data_list
for i in range(cp-1):
tmp = np.concatenate((tmp, np.random.permutation(data_list)), axis=None)
tmp = np.concatenate((tmp, np.random.choice(data_list, max_len % size_i, replace=False)), axis=None)
self.extended_indices_list.append(list(tmp))
else:
self.extended_indices_list = dataset_indices_lists
logstr = "\n".join(["Split %s, %s: %d -(extend to)-> %d" %
(self.phase, self.args.dataset_list[i], len(dataset_indices_lists[i]),
len(self.extended_indices_list[i]))
for i in range(datasets_num)]
)
logger.info(logstr)
# class CustomerMultiDataSamples(torch.utils.data.Sampler):
# """
# Construct a sample method. Sample former ratio_samples of datasets randomly.
# """
# def __init__(self, multi_data_indices, ratio_samples, opt, rank=None, num_replicas=None):
# logger = logging.getLogger(__name__)
# self.multi_data_indices = multi_data_indices
# self.num_indices = np.array([len(i) for i in self.multi_data_indices])
# self.num_samples = (self.num_indices * ratio_samples).astype(np.uint32)
# self.max_indices = np.array([max(i) for i in self.multi_data_indices])
# self.total_sampled_size = np.sum(self.num_samples)
# self.phase = opt.phase
# if num_replicas is None:
# if not dist.is_available():
# raise RuntimeError("Requires distributed package to be available")
# num_replicas = dist.get_world_size()
# self.num_replicas = num_replicas
# if rank is None:
# if not dist.is_available():
# raise RuntimeError("Requires distributed package to be available")
# rank = dist.get_rank()
# self.rank = rank
# self.num_dist_samples = int(math.ceil(self.total_sampled_size * 1.0 / self.num_replicas))
# self.total_dist_size = self.num_dist_samples * self.num_replicas
# logger.info('Sample %02f, sampled dataset sizes are %s' % (ratio_samples, ','.join(map(str, self.num_samples))))
# def __iter__(self):
# cum_sum = np.cumsum(np.append([0], self.max_indices))
# if 'train' in self.phase:
# indices_array = [[self.multi_data_indices[idx][i] + cum_sum[idx] for i in torch.randperm(int(num))] for
# idx, num in
# enumerate(self.num_samples)]
# else:
# indices_array = [[self.multi_data_indices[idx][i] + cum_sum[idx] for i in range(int(num))] for
# idx, num in enumerate(self.num_samples)]
# if 'train' in self.phase:
# # data list is reshaped in [A, B, C, A, B, C....]
# indices_array = np.array(indices_array).transpose(1, 0).reshape(-1)
# else:
# indices_array = np.concatenate(indices_array[:])
# # add extra samples to make it evenly divisible
# diff_size = int(self.total_dist_size - self.total_sampled_size)
# if diff_size > 0:
# extended_indices_dist = np.append(indices_array, indices_array[:diff_size])
# else:
# extended_indices_dist = indices_array
# assert extended_indices_dist.size == self.total_dist_size
# # subsample
# offset = self.num_dist_samples * self.rank
# rank_indices = extended_indices_dist[offset: offset + self.num_dist_samples]
# assert rank_indices.size == self.num_dist_samples
# return iter(rank_indices)
# def create_dataset(opt):
# logger = logging.getLogger(__name__)
# dataset = find_dataset_lib(opt.dataset)()
# dataset.initialize(opt)
# logger.info("%s is created." % opt.dataset)
# return dataset
def create_multiple_dataset(opt):
all_datasets = []
dataset_indices_lists = []
indices_len = []
for name in opt.dataset_list:
dataset = find_dataset_lib(opt.dataset)(opt, name)
#dataset.initialize(opt, name)
logger.info("%s : %s is loaded, the data size is %d" % (opt.phase, name, len(dataset)))
all_datasets.append(dataset)
assert dataset.curriculum_list is not None, "Curriculum is None!!!"
dataset_indices_lists.append(dataset.curriculum_list)
indices_len.append(len(dataset.curriculum_list))
assert len(dataset.curriculum_list) == dataset.data_size, "Curriculum list size not equal the data size!!!"
max_len = np.max(indices_len)
# if 'train' in opt.phase:
# extended_indices_list = [i * (max_len // len(i)) + list(np.random.choice(i, max_len % len(i), replace=False)) for i in dataset_indices_lists]
# #extended_indices_list = [i + list(np.random.choice(i, max_len-len(i))) for i in dataset_indices_lists]
# else:
# extended_indices_list = dataset_indices_lists
logger.info("%s are merged!" % opt.dataset_list)
return all_datasets#, extended_indices_list
def find_dataset_lib(dataset_name):
"""
Give the option --dataset [datasetname], import "data/datasetname_dataset.py"
:param dataset_name: --dataset
:return: "data/datasetname_dataset.py"
"""
logger = logging.getLogger(__name__)
dataset_filename = "data." + dataset_name + "_dataset"
datasetlib = importlib.import_module(dataset_filename)
dataset = None
target_dataset_name = dataset_name.replace('_', '') + 'dataset'
for name, cls in datasetlib.__dict__.items():
if name.lower() == target_dataset_name.lower():
dataset = cls
if dataset is None:
logger.info("In %s.py, there should be a class name that matches %s in lowercase." % (
dataset_filename, target_dataset_name))
exit(0)
return dataset
| {
"repo_name": "aim-uofa/AdelaiDepth",
"stars": "926",
"repo_language": "Python",
"file_name": "train.py",
"mime_type": "text/x-script.python"
} |