Merge remote-tracking branch 'refs/remotes/origin/master'

main
Fabien Benureau 9 years ago
commit 8afb7a4777

@ -5,10 +5,12 @@ import re
from .p3 import ustr, uchr
# citekey stuff
# Citekey stuff
TYPE_KEY = 'type'
CONTROL_CHARS = ''.join(map(uchr, list(range(0, 32)) + list(range(127, 160))))
CITEKEY_FORBIDDEN_CHARS = '@\'\\,#}{~%/' # '/' is OK for bibtex but forbidden
CITEKEY_FORBIDDEN_CHARS = '@\'\\,#}{~%/ ' # '/' is OK for bibtex but forbidden
# here since we transform citekeys into filenames
CITEKEY_EXCLUDE_RE = re.compile('[%s]'
% re.escape(CONTROL_CHARS + CITEKEY_FORBIDDEN_CHARS))
@ -73,24 +75,22 @@ def extract_docfile(bibdata, remove=False):
:param remove: remove field after extracting information (default: False)
"""
citekey, entry = get_entry(bibdata)
try:
if 'file' in entry:
field = entry['file']
if 'file' in bibdata:
field = bibdata['file']
# Check if this is mendeley specific
for f in field.split(':'):
if len(f) > 0:
break
if remove:
entry.pop('file')
bibdata.pop('file')
# This is a hck for Mendeley. Make clean
if f[0] != '/':
f = '/' + f
return f
if 'attachments' in entry:
return entry['attachments']
if 'pdf' in entry:
return entry['pdf']
if 'attachments' in bibdata:
return bibdata['attachments']
if 'pdf' in bibdata:
return bibdata['pdf']
except (KeyError, IndexError):
return None

@ -12,7 +12,7 @@ from .. import pretty
def parser(subparsers):
parser = subparsers.add_parser('add', help='add a paper to the repository')
parser.add_argument('bibfile', nargs='?', default = None,
parser.add_argument('bibfile', nargs='?', default=None,
help='bibtex file')
parser.add_argument('-D', '--doi', help='doi number to retrieve the bibtex entry, if it is not provided', default=None)
parser.add_argument('-I', '--isbn', help='isbn number to retrieve the bibtex entry, if it is not provided', default=None)
@ -23,10 +23,12 @@ def parser(subparsers):
default=None)
parser.add_argument('-L', '--link', action='store_false', dest='copy', default=True,
help="don't copy document files, just create a link.")
parser.add_argument('-M', '--move', action='store_true', dest='move', default=False,
help="move document instead of of copying (ignored if --link).")
return parser
def bibdata_from_editor(ui, rp):
def bibentry_from_editor(ui, rp):
again = True
bibstr = templates.add_bib
while again:
@ -41,8 +43,8 @@ def bibdata_from_editor(ui, rp):
if not again:
ui.exit(0)
else:
bibdata = rp.databroker.verify(bibstr)
bibstruct.verify_bibdata(bibdata)
bibentry = rp.databroker.verify(bibstr)
bibstruct.verify_bibdata(bibentry)
# REFACTOR Generate citykey
again = False
except ValueError:
@ -52,7 +54,8 @@ def bibdata_from_editor(ui, rp):
if not again:
ui.exit(0)
return bibdata
return bibentry
def command(args):
"""
@ -64,47 +67,47 @@ def command(args):
bibfile = args.bibfile
docfile = args.docfile
tags = args.tags
citekey = args.copy
citekey = args.citekey
rp = repo.Repository(config())
# get bibtex entry
if bibfile is None:
if args.doi is None and args.isbn is None:
bibdata = bibdata_from_editor(ui, rp)
bibentry = bibentry_from_editor(ui, rp)
else:
if args.doi is not None:
bibdata_raw = apis.doi2bibtex(args.doi)
bibdata = rp.databroker.verify(bibdata_raw)
if bibdata is None:
bibentry_raw = apis.doi2bibtex(args.doi)
bibentry = rp.databroker.verify(bibentry_raw)
if bibentry is None:
ui.error('invalid doi {} or unable to retrieve bibfile from it.'.format(args.doi))
if args.isbn is None:
ui.exit(1)
if args.isbn is not None:
bibdata_raw = apis.isbn2bibtex(args.isbn)
bibdata = rp.databroker.verify(bibdata_raw)
if bibdata is None:
bibentry_raw = apis.isbn2bibtex(args.isbn)
bibentry = rp.databroker.verify(bibentry_raw)
if bibentry is None:
ui.error('invalid isbn {} or unable to retrieve bibfile from it.'.format(args.isbn))
ui.exit(1)
# TODO distinguish between cases, offer to open the error page in a webbrowser.
# TODO offer to confirm/change citekey
else:
bibdata_raw = content.get_content(bibfile, ui=ui)
bibdata = rp.databroker.verify(bibdata_raw)
if bibdata is None:
bibentry_raw = content.get_content(bibfile, ui=ui)
bibentry = rp.databroker.verify(bibentry_raw)
if bibentry is None:
ui.error('invalid bibfile {}.'.format(bibfile))
# citekey
citekey = args.citekey
if citekey is None:
base_key = bibstruct.extract_citekey(bibdata)
base_key = bibstruct.extract_citekey(bibentry)
citekey = rp.unique_citekey(base_key)
elif citekey in rp:
ui.error('citekey already exist {}.'.format(citekey))
ui.exit(1)
p = paper.Paper(bibdata, citekey=citekey)
p = paper.Paper.from_bibentry(bibentry, citekey=citekey)
# tags
@ -113,7 +116,7 @@ def command(args):
# document file
bib_docfile = bibstruct.extract_docfile(bibdata)
bib_docfile = bibstruct.extract_docfile(bibentry)
if docfile is None:
docfile = bib_docfile
elif bib_docfile is not None:
@ -127,7 +130,9 @@ def command(args):
if docfile is not None:
rp.push_doc(p.citekey, docfile, copy=args.copy)
if args.copy:
if ui.input_yn('{} has been copied into pubs; should the original be removed?'.format(color.dye(docfile, color.bold))):
if args.move:
content.remove_file(docfile)
elif ui.input_yn('{} has been copied into pubs; should the original be removed?'.format(color.dye(docfile, color.bold))):
content.remove_file(docfile)
ui.print_('added to pubs:\n{}'.format(pretty.paper_oneliner(p)))
except ValueError as v:

@ -11,6 +11,8 @@ def parser(subparsers):
# help="copy document files into library directory (default)")
parser.add_argument('-L', '--link', action='store_false', dest='copy', default=True,
help="don't copy document files, just create a link.")
parser.add_argument('-M', '--move', action='store_true', dest='move', default=False,
help="move document instead of of copying (ignored if --link).")
parser.add_argument('citekey',
help='citekey of the paper')
parser.add_argument('document',
@ -32,9 +34,9 @@ def command(args):
try:
document = args.document
rp.push_doc(paper.citekey, document, copy=args.copy)
if args.copy:
if ui.input_yn('{} has been copied into pubs; should the original be removed?'.format(color.dye(document, color.bold))):
content.remove_file(document)
if args.copy and args.move:
content.remove_file(document)
ui.print_('{} attached to {}'.format(color.dye(document, color.bold), color.dye(paper.citekey, color.citekey)))
except ValueError as v:

@ -5,6 +5,7 @@ from ..uis import get_ui
from ..endecoder import EnDecoder
from ..utils import resolve_citekey
def parser(subparsers):
parser = subparsers.add_parser('edit',
help='open the paper bibliographic file in an editor')
@ -34,7 +35,7 @@ def command(args):
encode = coder.encode_bibdata
decode = coder.decode_bibdata
suffix = '.bib'
raw_content = encode(paper.bibdata)
raw_content = encode(paper.bibentry)
while True:
# Get new content from user
@ -44,10 +45,11 @@ def command(args):
content = decode(raw_content)
if meta:
new_paper = Paper(paper.bibdata, citekey=paper.citekey,
new_paper = Paper(paper.citekey, paper.bibdata,
metadata=content)
else:
new_paper = Paper(content, metadata=paper.metadata)
new_paper = Paper.from_bibentry(content,
metadata=paper.metadata)
rp.rename_paper(new_paper, old_citekey=paper.citekey)
break

@ -33,7 +33,7 @@ def command(args):
papers = rp.all_papers()
bib = {}
for p in papers:
bib[p.citekey] = p.bibentry
bib[p.citekey] = p.bibdata
exporter = endecoder.EnDecoder()
bibdata_raw = exporter.encode_bibdata(bib)
ui.print_(bibdata_raw)

@ -49,12 +49,9 @@ def many_from_path(bibpath):
papers = {}
for b in biblist:
for k in b.keys():
for k, b in b.items():
try:
bibdata = {}
bibdata[k] = b[k]
papers[k] = Paper(bibdata, citekey=k)
papers[k] = Paper(k, b)
papers[k].added = datetime.datetime.now()
except ValueError as e:
papers[k] = e

@ -79,10 +79,10 @@ def _lower(s, lower=True):
def _check_author_match(paper, query, case_sensitive=False):
"""Only checks within last names."""
if not 'author' in paper.bibentry:
if not 'author' in paper.bibdata:
return False
return any([query in _lower(bibstruct.author_last(p), lower=(not case_sensitive))
for p in paper.bibentry['author']])
for p in paper.bibdata['author']])
@ -92,8 +92,8 @@ def _check_tag_match(paper, query, case_sensitive=False):
def _check_field_match(paper, field, query, case_sensitive=False):
return query in _lower(paper.bibentry[field],
lower=(not case_sensitive))
return query in _lower(paper.bibdata[field],
lower=(not case_sensitive))
def _check_query_block(paper, query_block, case_sensitive=None):
@ -106,7 +106,7 @@ def _check_query_block(paper, query_block, case_sensitive=None):
return _check_tag_match(paper, value, case_sensitive=case_sensitive)
elif field == 'author':
return _check_author_match(paper, value, case_sensitive=case_sensitive)
elif field in paper.bibentry:
elif field in paper.bibdata:
return _check_field_match(paper, field, value,
case_sensitive=case_sensitive)
else:

@ -1,7 +1,7 @@
import os
import collections
from .p3 import configparser, _read_config
from .p3 import configparser, ConfigParser, _read_config
from .content import check_file, _open
@ -49,7 +49,7 @@ class Config(object):
def __init__(self, **kwargs):
object.__setattr__(self, '_section', MAIN_SECTION) # active section
object.__setattr__(self, '_cfg', configparser.SafeConfigParser())
object.__setattr__(self, '_cfg', ConfigParser())
self._cfg.add_section(self._section)
for name, value in DFT_CONFIG.items():
@ -66,12 +66,12 @@ class Config(object):
if not check_file(path, fail=False):
raise IOError(("The configuration file {} does not exist."
" Did you run 'pubs init' ?").format(path))
with _open(path, 'rb+') as f:
with _open(path, 'r+') as f:
_read_config(self._cfg, f)
return self
def save(self, path=DFT_CONFIG_PATH):
with _open(path, 'wb+') as f:
with _open(path, 'w+') as f:
self._cfg.write(f)
def __setattr__(self, name, value):

@ -21,7 +21,7 @@ class DataBroker(object):
metadata_raw = self.filebroker.pull_metafile(citekey)
return self.endecoder.decode_metadata(metadata_raw)
def pull_bibdata(self, citekey):
def pull_bibentry(self, citekey):
bibdata_raw = self.filebroker.pull_bibfile(citekey)
return self.endecoder.decode_bibdata(bibdata_raw)
@ -29,7 +29,7 @@ class DataBroker(object):
metadata_raw = self.endecoder.encode_metadata(metadata)
self.filebroker.push_metafile(citekey, metadata_raw)
def push_bibdata(self, citekey, bibdata):
def push_bibentry(self, citekey, bibdata):
bibdata_raw = self.endecoder.encode_bibdata(bibdata)
self.filebroker.push_bibfile(citekey, bibdata_raw)

@ -31,14 +31,14 @@ class DataCache(object):
def pull_metadata(self, citekey):
return self.databroker.pull_metadata(citekey)
def pull_bibdata(self, citekey):
return self.databroker.pull_bibdata(citekey)
def pull_bibentry(self, citekey):
return self.databroker.pull_bibentry(citekey)
def push_metadata(self, citekey, metadata):
self.databroker.push_metadata(citekey, metadata)
def push_bibdata(self, citekey, bibdata):
self.databroker.push_bibdata(citekey, bibdata)
def push_bibentry(self, citekey, bibdata):
self.databroker.push_bibentry(citekey, bibdata)
def push(self, citekey, metadata, bibdata):
self.databroker.push(citekey, metadata, bibdata)

@ -12,14 +12,23 @@ except ImportError:
import yaml
from .bibstruct import TYPE_KEY
"""Important notice:
All functions and methods in this file assume and produce unicode data.
"""
if bp.__version__ > "0.6.0":
BP_ID_KEY = 'ID'
BP_ENTRYTYPE_KEY = 'ENTRYTYPE'
else:
BP_ID_KEY = 'id'
BP_ENTRYTYPE_KEY = 'type'
def sanitize_citekey(record):
record['id'] = record['id'].strip('\n')
record[BP_ID_KEY] = record[BP_ID_KEY].strip('\n')
return record
@ -44,8 +53,8 @@ def customizations(record):
return record
bibfield_order = ['author', 'title', 'journal', 'institution', 'publisher',
'year', 'month', 'number', 'pages', 'link', 'doi', 'id',
'note', 'abstract']
'year', 'month', 'number', 'pages', 'link', 'doi', 'note',
'abstract']
class EnDecoder(object):
@ -88,7 +97,7 @@ class EnDecoder(object):
@staticmethod
def _encode_bibentry(citekey, bibentry):
bibraw = '@{}{{{},\n'.format(bibentry['type'], citekey)
bibraw = '@{}{{{},\n'.format(bibentry[TYPE_KEY], citekey)
bibentry = copy.copy(bibentry)
for key in bibfield_order:
if key in bibentry:
@ -96,7 +105,7 @@ class EnDecoder(object):
bibraw += ' {} = {{{}}},\n'.format(
key, EnDecoder._encode_field(key, value))
for key, value in bibentry.items():
if key != 'type':
if key != TYPE_KEY:
bibraw += ' {} = {{{}}},\n'.format(
key, EnDecoder._encode_field(key, value))
bibraw += '}\n'
@ -107,9 +116,12 @@ class EnDecoder(object):
try:
entries = bp.bparser.BibTexParser(
bibdata, customization=customizations).get_entry_dict()
# Remove 'id' attribute which is stored as citekey
# Remove id from bibtexparser attribute which is stored as citekey
for e in entries:
entries[e].pop('id')
entries[e].pop(BP_ID_KEY)
# Convert bibtexparser entrytype key to internal 'type'
t = entries[e].pop(BP_ENTRYTYPE_KEY)
entries[e][TYPE_KEY] = t
if len(entries) > 0:
return entries
except Exception:

@ -3,7 +3,8 @@ import sys
if sys.version_info[0] == 2:
import ConfigParser as configparser
_read_config = configparser.SafeConfigParser.readfp
ConfigParser = configparser.SafeConfigParser
_read_config = ConfigParser.readfp
def input():
return raw_input().decode(sys.stdin.encoding or 'utf8', 'ignore')
@ -28,7 +29,8 @@ if sys.version_info[0] == 2:
else:
import configparser
_read_config = configparser.SafeConfigParser.read_file
ConfigParser = configparser.ConfigParser
_read_config = ConfigParser.read_file
ustr = str
uchr = chr
from urllib.parse import urlparse

@ -28,19 +28,11 @@ class Paper(object):
in a pythonic manner.
"""
def __init__(self, bibdata, citekey=None, metadata=None):
def __init__(self, citekey, bibdata, metadata=None):
self.citekey = citekey
self.metadata = _clean_metadata(metadata)
self.bibdata = bibdata
_, self.bibentry = bibstruct.get_entry(self.bibdata)
if self.citekey is None:
self.citekey = bibstruct.extract_citekey(self.bibdata)
bibstruct.check_citekey(self.citekey)
else:
def_citekey = bibstruct.extract_citekey(self.bibdata)
self.bibdata = {citekey: self.bibdata[def_citekey]}
bibstruct.check_citekey(self.citekey)
def __eq__(self, other):
return (isinstance(self, Paper) and type(other) is type(self)
@ -50,15 +42,15 @@ class Paper(object):
def __repr__(self):
return 'Paper(%s, %s, %s)' % (
self.citekey, self.bibentry, self.metadata)
self.citekey, self.bibdata, self.metadata)
def __deepcopy__(self, memo):
return Paper(citekey =self.citekey,
return Paper(citekey=self.citekey,
metadata=copy.deepcopy(self.metadata, memo),
bibdata=copy.deepcopy(self.bibdata, memo))
def __copy__(self):
return Paper(citekey =self.citekey,
return Paper(citekey=self.citekey,
metadata=self.metadata,
bibdata=self.bibdata)
@ -67,6 +59,10 @@ class Paper(object):
# docpath
@property
def bibentry(self):
return {self.citekey: self.bibdata}
@property
def docpath(self):
return self.metadata.get('docfile', '')
@ -105,3 +101,10 @@ class Paper(object):
@added.setter
def added(self, value):
self.metadata['added'] = value
@staticmethod
def from_bibentry(bibentry, citekey=None, metadata=None):
bibentry_key, bibdata = bibstruct.get_entry(bibentry)
if citekey is None:
citekey = bibentry_key
return Paper(citekey, bibdata, metadata=metadata)

@ -1,6 +1,7 @@
# display formatting
from . import color
from .bibstruct import TYPE_KEY
# should be adaptated to bibtexparser dicts
@ -12,9 +13,9 @@ def person_repr(p):
' '.join(p.lineage(abbr=True))] if s)
def short_authors(bibentry):
def short_authors(bibdata):
try:
authors = [p for p in bibentry['author']]
authors = [p for p in bibdata['author']]
if len(authors) < 3:
return ' and '.join(authors)
else:
@ -23,19 +24,19 @@ def short_authors(bibentry):
return ''
def bib_oneliner(bibentry):
authors = short_authors(bibentry)
def bib_oneliner(bibdata):
authors = short_authors(bibdata)
journal = ''
if 'journal' in bibentry:
journal = ' ' + bibentry['journal']['name']
elif bibentry['type'] == 'inproceedings':
journal = ' ' + bibentry.get('booktitle', '')
if 'journal' in bibdata:
journal = ' ' + bibdata['journal']['name']
elif bibdata[TYPE_KEY] == 'inproceedings':
journal = ' ' + bibdata.get('booktitle', '')
return u'{authors} \"{title}\"{journal}{year}'.format(
authors=color.dye(authors, color.grey, bold=True),
title=bibentry.get('title', ''),
title=bibdata.get('title', ''),
journal=color.dye(journal, color.yellow),
year=' ({})'.format(bibentry['year']) if 'year' in bibentry else '',
year=' ({})'.format(bibdata['year']) if 'year' in bibdata else '',
)
@ -48,11 +49,11 @@ def bib_desc(bib_data):
return s
def paper_oneliner(p, citekey_only = False):
def paper_oneliner(p, citekey_only=False):
if citekey_only:
return p.citekey
else:
bibdesc = bib_oneliner(p.bibentry)
bibdesc = bib_oneliner(p.bibdata)
tags = '' if len(p.tags) == 0 else '| {}'.format(
','.join(color.dye(t, color.tag) for t in sorted(p.tags)))
return u'[{citekey}] {descr} {tags}'.format(

@ -58,9 +58,10 @@ class Repository(object):
def pull_paper(self, citekey):
"""Load a paper by its citekey from disk, if necessary."""
if citekey in self:
return Paper(self.databroker.pull_bibdata(citekey),
citekey=citekey,
metadata=self.databroker.pull_metadata(citekey))
return Paper.from_bibentry(
self.databroker.pull_bibentry(citekey),
citekey=citekey,
metadata=self.databroker.pull_metadata(citekey))
else:
raise InvalidReference('{} citekey not found'.format(citekey))
@ -75,7 +76,7 @@ class Repository(object):
raise CiteKeyCollision('citekey {} already in use'.format(paper.citekey))
if not paper.added:
paper.added = datetime.now()
self.databroker.push_bibdata(paper.citekey, paper.bibdata)
self.databroker.push_bibentry(paper.citekey, paper.bibentry)
self.databroker.push_metadata(paper.citekey, paper.metadata)
self.citekeys.add(paper.citekey)
if event:

@ -57,7 +57,7 @@ and then add `\cite{Loeb_2012}` in your manuscript. After running the bash scrip
Requirements
------------
- python >= 2.6
- python >= 2.7
- [dateutil](http://labix.org/python-dateutil)
- [pyYaml](http://pyyaml.org) (will be deprecated soon)
- [bibtexparser](https://github.com/sciunto/python-bibtexparser) >= 0.5.3

@ -202,6 +202,9 @@ class FakeInput():
input() raises IndexError
"""
class UnexpectedInput(Exception):
pass
def __init__(self, inputs, module_list=tuple()):
self.inputs = list(inputs) or []
self.module_list = module_list
@ -218,9 +221,12 @@ class FakeInput():
self.inputs.append(inp)
def __call__(self, *args, **kwargs):
inp = self.inputs[self._cursor]
self._cursor += 1
return inp
try:
inp = self.inputs[self._cursor]
self._cursor += 1
return inp
except IndexError:
raise self.UnexpectedInput('Unexpected user input in test.')
class TestFakeFs(unittest.TestCase):

@ -23,18 +23,18 @@ doe_bib = """
dummy_metadata = {'docfile': 'docsdir://hop.la', 'tags': set(['a', 'b'])}
franny_bibdata = coder.decode_bibdata(franny_bib)
franny_bibentry = franny_bibdata['Franny1961']
franny_bibentry = coder.decode_bibdata(franny_bib)
franny_bibdata = franny_bibentry['Franny1961']
doe_bibdata = coder.decode_bibdata(doe_bib)
doe_bibentry = doe_bibdata['Doe2013']
doe_bibentry = coder.decode_bibdata(doe_bib)
doe_bibdata = doe_bibentry['Doe2013']
turing_bibdata = coder.decode_bibdata(str_fixtures.turing_bib)
turing_bibentry = turing_bibdata['turing1950computing']
turing_bibentry = coder.decode_bibdata(str_fixtures.turing_bib)
turing_bibdata = turing_bibentry['turing1950computing']
turing_metadata = coder.decode_metadata(str_fixtures.turing_meta)
page_bibdata = coder.decode_bibdata(str_fixtures.bibtex_raw0)
page_bibentry = page_bibdata['Page99']
page_bibentry = coder.decode_bibdata(str_fixtures.bibtex_raw0)
page_bibdata = page_bibentry['Page99']
page_metadata = coder.decode_metadata(str_fixtures.metadata_raw0)
page_metadata = coder.decode_metadata(str_fixtures.metadata_raw0)

@ -16,18 +16,19 @@ class TestGenerateCitekey(unittest.TestCase):
bibstruct.generate_citekey(None)
def test_escapes_chars(self):
doe_bibdata = copy.deepcopy(fixtures.doe_bibdata)
citekey, entry = bibstruct.get_entry(doe_bibdata)
entry['author'] = [u'Zôu\\@/ , John']
key = bibstruct.generate_citekey(doe_bibdata)
doe_bibentry = copy.deepcopy(fixtures.doe_bibentry)
citekey, bibdata = bibstruct.get_entry(doe_bibentry)
bibdata['author'] = [u'Zôu\\@/ , John']
key = bibstruct.generate_citekey(doe_bibentry)
self.assertEqual(key, 'Zou2013')
def test_simple(self):
bibdata = copy.deepcopy(fixtures.doe_bibdata)
key = bibstruct.generate_citekey(bibdata)
bibentry = copy.deepcopy(fixtures.doe_bibentry)
key = bibstruct.generate_citekey(bibentry)
self.assertEqual(key, 'Doe2013')
bibdata = copy.deepcopy(fixtures.franny_bibdata)
key = bibstruct.generate_citekey(bibdata)
bibentry = copy.deepcopy(fixtures.franny_bibentry)
key = bibstruct.generate_citekey(bibentry)
self.assertEqual(key, 'Salinger1961')

@ -17,7 +17,7 @@ class TestDataBroker(unittest.TestCase):
ende = endecoder.EnDecoder()
page99_metadata = ende.decode_metadata(str_fixtures.metadata_raw0)
page99_bibdata = ende.decode_bibdata(str_fixtures.bibtex_raw0)
page99_bibentry = ende.decode_bibdata(str_fixtures.bibtex_raw0)
for db_class in [databroker.DataBroker, datacache.DataCache]:
self.fs = fake_env.create_fake_fs([content, filebroker, configs])
@ -28,22 +28,22 @@ class TestDataBroker(unittest.TestCase):
self.assertFalse(db.exists('citekey1', meta_check=True))
self.assertFalse(db.exists('citekey1', meta_check=False))
db.push_bibdata('citekey1', page99_bibdata)
db.push_bibentry('citekey1', page99_bibentry)
self.assertTrue(db.exists('citekey1', meta_check=False))
self.assertTrue(db.exists('citekey1', meta_check=True))
self.assertEqual(db.pull_metadata('citekey1'), page99_metadata)
pulled = db.pull_bibdata('citekey1')['Page99']
pulled = db.pull_bibentry('citekey1')['Page99']
for key, value in pulled.items():
self.assertEqual(pulled[key], page99_bibdata['Page99'][key])
self.assertEqual(db.pull_bibdata('citekey1'), page99_bibdata)
self.assertEqual(pulled[key], page99_bibentry['Page99'][key])
self.assertEqual(db.pull_bibentry('citekey1'), page99_bibentry)
fake_env.unset_fake_fs([content, filebroker])
def test_existing_data(self):
ende = endecoder.EnDecoder()
page99_bibdata = ende.decode_bibdata(str_fixtures.bibtex_raw0)
page99_bibentry = ende.decode_bibdata(str_fixtures.bibtex_raw0)
for db_class in [databroker.DataBroker, datacache.DataCache]:
self.fs = fake_env.create_fake_fs([content, filebroker])
@ -51,16 +51,16 @@ class TestDataBroker(unittest.TestCase):
db = db_class('repo', create=False)
self.assertEqual(db.pull_bibdata('Page99'), page99_bibdata)
self.assertEqual(db.pull_bibentry('Page99'), page99_bibentry)
for citekey in ['10.1371_journal.pone.0038236',
'10.1371journal.pone.0063400',
'journal0063400']:
db.pull_bibdata(citekey)
db.pull_bibentry(citekey)
db.pull_metadata(citekey)
with self.assertRaises(IOError):
db.pull_bibdata('citekey')
db.pull_bibentry('citekey')
with self.assertRaises(IOError):
db.pull_metadata('citekey')

@ -86,9 +86,7 @@ class TestEnDecode(unittest.TestCase):
biblines = turing_bib.splitlines()
biblines.insert(-3, keyword_str)
bibsrc = '\n'.join(biblines)
print(bibsrc)
entry = decoder.decode_bibdata(bibsrc)['turing1950computing']
print(entry)
self.assertNotIn(u'keywords', entry)
self.assertIn(u'keyword', entry)
self.assertEqual(set(keywords), set(entry[u'keyword']))

@ -75,19 +75,19 @@ class TestEvents(unittest.TestCase):
correct = ['abcdefghijklmnopqrstuvwxyz 12 15',
'Helloword',
'one']
self.assertEquals(_output, correct)
self.assertEqual(_output, correct)
def test_listen_AddEvent(self):
addevent = AddEvent()
addevent.send()
correct = [42]
self.assertEquals(_output, correct)
self.assertEqual(_output, correct)
def test_listen_Info(self):
Info('info').send()
SpecificInfo('info', 'specific').send()
correct = ['info', 'info', 'specific']
self.assertEquals(_output, correct)
self.assertEqual(_output, correct)
if __name__ == '__main__':

@ -1,5 +1,5 @@
# -*- coding: utf-8 -*-
import os
import unittest
import dotdot
@ -9,38 +9,38 @@ from pubs.paper import Paper
class TestAttributes(unittest.TestCase):
def setUp(self):
self.p = Paper.from_bibentry(
fixtures.page_bibentry,
metadata=fixtures.page_metadata).deepcopy()
def test_tags(self):
p = Paper(fixtures.page_bibdata, metadata=fixtures.page_metadata).deepcopy()
self.assertEqual(p.tags, set(['search', 'network']))
self.assertEqual(self.p.tags, set(['search', 'network']))
def test_add_tag(self):
p = Paper(fixtures.page_bibdata, metadata=fixtures.page_metadata).deepcopy()
p.add_tag('algorithm')
self.assertEqual(p.tags, set(['search', 'network', 'algorithm']))
p.add_tag('algorithm')
self.assertEqual(p.tags, set(['search', 'network', 'algorithm']))
self.p.add_tag('algorithm')
self.assertEqual(self.p.tags, set(['search', 'network', 'algorithm']))
self.p.add_tag('algorithm')
self.assertEqual(self.p.tags, set(['search', 'network', 'algorithm']))
def test_set_tags(self):
p = Paper(fixtures.page_bibdata, metadata=fixtures.page_metadata).deepcopy()
p.tags = ['algorithm']
self.assertEqual(p.tags, set(['algorithm']))
self.p.tags = ['algorithm']
self.assertEqual(self.p.tags, set(['algorithm']))
def test_remove_tags(self):
p = Paper(fixtures.page_bibdata, metadata=fixtures.page_metadata).deepcopy()
p.remove_tag('network')
self.assertEqual(p.tags, set(['search']))
self.p.remove_tag('network')
self.assertEqual(self.p.tags, set(['search']))
def test_mixed_tags(self):
p = Paper(fixtures.page_bibdata, metadata=fixtures.page_metadata).deepcopy()
p.add_tag('algorithm')
self.assertEqual(p.tags, set(['search', 'network', 'algorithm']))
p.remove_tag('network')
self.assertEqual(p.tags, set(['search', 'algorithm']))
p.tags = ['ranking']
self.assertEqual(p.tags, set(['ranking']))
p.remove_tag('ranking')
self.assertEqual(p.tags, set())
p.remove_tag('ranking')
self.p.add_tag('algorithm')
self.assertEqual(self.p.tags, set(['search', 'network', 'algorithm']))
self.p.remove_tag('network')
self.assertEqual(self.p.tags, set(['search', 'algorithm']))
self.p.tags = ['ranking']
self.assertEqual(self.p.tags, set(['ranking']))
self.p.remove_tag('ranking')
self.assertEqual(self.p.tags, set())
self.p.remove_tag('ranking')
if __name__ == '__main__':

@ -2,18 +2,19 @@ import unittest
import dotdot
from pubs.commands.list_cmd import (_check_author_match,
_check_field_match,
_check_query_block,
filter_paper,
InvalidQuery)
_check_field_match,
_check_query_block,
filter_paper,
InvalidQuery)
from pubs.paper import Paper
import fixtures
doe_paper = Paper(fixtures.doe_bibdata)
page_paper = Paper(fixtures.page_bibdata)
turing_paper = Paper(fixtures.turing_bibdata, metadata=fixtures.turing_metadata)
doe_paper = Paper.from_bibentry(fixtures.doe_bibentry)
page_paper = Paper.from_bibentry(fixtures.page_bibentry)
turing_paper = Paper.from_bibentry(fixtures.turing_bibentry,
metadata=fixtures.turing_metadata)
class TestAuthorFilter(unittest.TestCase):

@ -15,7 +15,7 @@ class TestRepo(fake_env.TestFakeFs):
def setUp(self):
super(TestRepo, self).setUp()
self.repo = Repository(configs.Config(), create=True)
self.repo.push_paper(Paper(fixtures.turing_bibdata))
self.repo.push_paper(Paper.from_bibentry(fixtures.turing_bibentry))
class TestCitekeyGeneration(TestRepo):
@ -27,9 +27,10 @@ class TestCitekeyGeneration(TestRepo):
self.assertEqual(_base27(26 + i + 1), 'a' + chr(97 + i))
def test_generated_key_is_unique(self):
self.repo.push_paper(Paper(fixtures.doe_bibdata))
self.repo.push_paper(Paper.from_bibentry(fixtures.doe_bibentry))
c = self.repo.unique_citekey('Doe2013')
self.repo.push_paper(Paper(fixtures.doe_bibdata, citekey='Doe2013a'))
self.repo.push_paper(Paper.from_bibentry(fixtures.doe_bibentry,
citekey='Doe2013a'))
c = self.repo.unique_citekey('Doe2013')
self.assertEqual(c, 'Doe2013b')
@ -38,25 +39,27 @@ class TestPushPaper(TestRepo):
def test_raises_value_error_on_existing_key(self):
with self.assertRaises(CiteKeyCollision):
self.repo.push_paper(Paper(fixtures.turing_bibdata))
self.repo.push_paper(Paper.from_bibentry(fixtures.turing_bibentry))
def test_pushes_paper_bibdata(self):
orig = fixtures.doe_bibdata
self.repo.push_paper(Paper(orig))
retrieved = self.repo.databroker.pull_bibdata('Doe2013')
self.assertEquals(orig, retrieved)
orig = fixtures.doe_bibentry
self.repo.push_paper(Paper.from_bibentry(orig))
retrieved = self.repo.databroker.pull_bibentry('Doe2013')
self.assertEqual(orig, retrieved)
def test_pushes_paper_metadata(self):
orig = {'docfile': 'dummy', 'tags': set(['tag', 'another']),
'added': datetime(2012, 12, 12, 12, 12, 12, 12)}
self.repo.push_paper(Paper(fixtures.doe_bibdata, metadata=orig))
self.repo.push_paper(Paper.from_bibentry(fixtures.doe_bibentry,
metadata=orig))
retrieved = self.repo.databroker.pull_metadata('Doe2013')
self.assertEquals(orig, retrieved)
self.assertEqual(orig, retrieved)
def test_pushes_paper_metadata_set_added(self):
orig = {'docfile': 'dummy', 'tags': set(['tag', 'another'])}
now = datetime.now()
self.repo.push_paper(Paper(fixtures.doe_bibdata, metadata=orig))
self.repo.push_paper(Paper.from_bibentry(fixtures.doe_bibentry,
metadata=orig))
retrieved = self.repo.databroker.pull_metadata('Doe2013')
self.assertIn('added', retrieved)
self.assertTrue(now < retrieved['added'])

@ -21,7 +21,7 @@ PRINT_OUTPUT=False
CAPTURE_OUTPUT=True
# code for fake fs
# code for fake fs
class TestFakeInput(unittest.TestCase):
@ -29,7 +29,7 @@ class TestFakeInput(unittest.TestCase):
input = fake_env.FakeInput(['yes', 'no'])
self.assertEqual(input(), 'yes')
self.assertEqual(input(), 'no')
with self.assertRaises(IndexError):
with self.assertRaises(fake_env.FakeInput.UnexpectedInput):
input()
def test_input2(self):
@ -37,7 +37,7 @@ class TestFakeInput(unittest.TestCase):
other_input.as_global()
self.assertEqual(color.input(), 'yes')
self.assertEqual(color.input(), 'no')
with self.assertRaises(IndexError):
with self.assertRaises(fake_env.FakeInput.UnexpectedInput):
color.input()
def test_editor_input(self):
@ -46,7 +46,7 @@ class TestFakeInput(unittest.TestCase):
other_input.as_global()
self.assertEqual(content.editor_input(), 'yes')
self.assertEqual(content.editor_input(), 'no')
with self.assertRaises(IndexError):
with self.assertRaises(fake_env.FakeInput.UnexpectedInput):
color.input()
@ -66,35 +66,41 @@ class CommandTestCase(unittest.TestCase):
In the latter case, the command is :
1. a string reprensenting the command to execute
2. the user inputs to feed to the command during execution
3. the output expected, verified with assertEqual
3. the output expected, verified with assertEqual. Always captures
output in this case.
"""
outs = []
for cmd in cmds:
inputs = []
output = None
actual_cmd = cmd
current_capture_output = capture_output
if not isinstance(cmd, p3.ustr):
if len(cmd) == 2:
input = fake_env.FakeInput(cmd[1], [content, uis, p3])
input.as_global()
if capture_output:
_, stdout, stderr = fake_env.redirect(pubs_cmd.execute)(cmd[0].split())
if len(cmd) == 3 and capture_output:
actual_out = color.undye(stdout)
correct_out = color.undye(cmd[2])
actual_cmd = cmd[0]
if len(cmd) == 2: # Inputs provided
inputs = cmd[1]
if len(cmd) == 3: # Expected output provided
current_capture_output = True
output = cmd[2]
# Always set fake input: test should not ask unexpected user input
input = fake_env.FakeInput(inputs, [content, uis, p3])
input.as_global()
try:
if current_capture_output:
_, stdout, stderr = fake_env.redirect(pubs_cmd.execute)(
actual_cmd.split())
self.assertEqual(stderr, '')
actual_out = color.undye(stdout)
if output is not None:
correct_out = color.undye(output)
self.assertEqual(actual_out, correct_out)
outs.append(color.undye(actual_out))
else:
pubs_cmd.execute(cmd.split())
else:
if capture_output:
assert p3.isbasestr(cmd)
_, stdout, stderr = fake_env.redirect(pubs_cmd.execute)(cmd.split())
else:
pubs_cmd.execute(cmd.split())
if capture_output:
assert(stderr == '')
outs.append(color.undye(stdout))
except fake_env.FakeInput.UnexpectedInput:
self.fail('Unexpected input asked by command: {}.'.format(
actual_cmd))
if PRINT_OUTPUT:
print(outs)
return outs
@ -161,13 +167,20 @@ class TestAdd(DataCommandTestCase):
def test_add_doc_nocopy_does_not_copy(self):
cmds = ['pubs init',
'pubs add /data/pagerank.bib -C -d /data/pagerank.pdf',
'pubs add /data/pagerank.bib --link -d /data/pagerank.pdf',
]
self.execute_cmds(cmds)
self.assertEqual(self.fs['os'].listdir(
self.fs['os'].path.join(self.default_pubs_dir, 'doc')),
[])
def test_add_move_removes_doc(self):
cmds = ['pubs init',
'pubs add /data/pagerank.bib --move -d /data/pagerank.pdf',
]
self.execute_cmds(cmds)
self.assertFalse(self.fs['os'].path.exists('/data/pagerank.pdf'))
def test_add_twice_fails(self):
cmds = ['pubs init',
'pubs add /data/pagerank.bib',
@ -186,10 +199,8 @@ class TestList(DataCommandTestCase):
'pubs list',
]
outs = self.execute_cmds(cmds)
print(outs[1].splitlines())
self.assertEquals(0, len(outs[1].splitlines()))
print(outs[3].splitlines())
self.assertEquals(1, len(outs[3].splitlines()))
self.assertEqual(0, len(outs[1].splitlines()))
self.assertEqual(1, len(outs[3].splitlines()))
def test_list_several_no_date(self):
self.execute_cmds(['pubs init -p /testrepo'])
@ -203,14 +214,11 @@ class TestList(DataCommandTestCase):
'pubs list',
]
outs = self.execute_cmds(cmds)
print(outs[0].splitlines())
self.assertEquals(4, len(outs[0].splitlines()))
print(outs[2].splitlines())
self.assertEquals(3, len(outs[2].splitlines()))
print(outs[4].splitlines())
self.assertEquals(4, len(outs[4].splitlines()))
self.assertEqual(4, len(outs[0].splitlines()))
self.assertEqual(3, len(outs[2].splitlines()))
self.assertEqual(4, len(outs[4].splitlines()))
# Last added should be last
self.assertEquals('[Page99]', outs[4].splitlines()[-1][:8])
self.assertEqual('[Page99]', outs[4].splitlines()[-1][:8])
def test_list_smart_case(self):
cmds = ['pubs init',
@ -219,8 +227,7 @@ class TestList(DataCommandTestCase):
'pubs list title:language author:Saunders',
]
outs = self.execute_cmds(cmds)
print(outs[-1])
self.assertEquals(1, len(outs[-1].splitlines()))
self.assertEqual(1, len(outs[-1].splitlines()))
def test_list_ignore_case(self):
cmds = ['pubs init',
@ -229,8 +236,7 @@ class TestList(DataCommandTestCase):
'pubs list --ignore-case title:lAnguAge author:saunders',
]
outs = self.execute_cmds(cmds)
print(outs[-1])
self.assertEquals(1, len(outs[-1].splitlines()))
self.assertEqual(1, len(outs[-1].splitlines()))
def test_list_force_case(self):
cmds = ['pubs init',
@ -239,20 +245,19 @@ class TestList(DataCommandTestCase):
'pubs list --force-case title:Language author:saunders',
]
outs = self.execute_cmds(cmds)
self.assertEquals(0 + 1, len(outs[-1].split('\n')))
self.assertEqual(0 + 1, len(outs[-1].split('\n')))
class TestUsecase(DataCommandTestCase):
def test_first(self):
correct = ['Initializing pubs in /paper_first\n',
'',
'[Page99] Page, Lawrence et al. "The PageRank Citation Ranking: Bringing Order to the Web." (1999) \nwas added to pubs.\n',
'[Page99] Page, Lawrence et al. "The PageRank Citation Ranking: Bringing Order to the Web." (1999) \n',
'\n',
'',
'network search\n',
'[Page99] Page, Lawrence et al. "The PageRank Citation Ranking: Bringing Order to the Web." (1999) network search\n'
'[Page99] Page, Lawrence et al. "The PageRank Citation Ranking: Bringing Order to the Web." (1999) | network,search\n',
]
cmds = ['pubs init -p paper_first/',
@ -264,7 +269,7 @@ class TestUsecase(DataCommandTestCase):
'pubs tag search',
]
self.assertEqual(correct, self.execute_cmds(cmds))
self.assertEqual(correct, self.execute_cmds(cmds, capture_output=True))
def test_second(self):
cmds = ['pubs init -p paper_second/',
@ -290,10 +295,8 @@ class TestUsecase(DataCommandTestCase):
]
self.execute_cmds(cmds)
docdir = self.fs['os'].path.expanduser('~/.pubs/doc/')
print(self.fs['os'].listdir(docdir))
self.assertNotIn('turing-mind-1950.pdf', self.fs['os'].listdir(docdir))
def test_tag_list(self):
correct = ['Initializing pubs in /paper_first\n',
'',
@ -364,7 +367,7 @@ class TestUsecase(DataCommandTestCase):
]
outs = self.execute_cmds(cmds)
self.assertEqual(endecoder.EnDecoder().decode_bibdata(outs[2]),
fixtures.page_bibdata)
fixtures.page_bibentry)
def test_import(self):
cmds = ['pubs init',
@ -414,6 +417,27 @@ class TestUsecase(DataCommandTestCase):
outs = self.execute_cmds(cmds)
self.assertEqual(1, len(outs[2].splitlines()))
def test_attach(self):
cmds = ['pubs init',
'pubs add data/pagerank.bib',
'pubs attach Page99 data/pagerank.pdf'
]
self.execute_cmds(cmds)
self.assertTrue(self.fs['os'].path.exists(
self.fs['os'].path.join(self.default_pubs_dir,
'doc',
'Page99.pdf')))
# Also test that do not remove original
self.assertTrue(self.fs['os'].path.exists('/data/pagerank.pdf'))
def test_attach_with_move(self):
cmds = ['pubs init -p paper_second/',
'pubs add data/pagerank.bib',
'pubs attach --move Page99 data/pagerank.pdf'
]
self.execute_cmds(cmds)
self.assertFalse(self.fs['os'].path.exists('/data/pagerank.pdf'))
if __name__ == '__main__':
unittest.main()

Loading…
Cancel
Save