diff --git a/.ackrc b/.ackrc
index 1617d11..a61f58e 100644
--- a/.ackrc
+++ b/.ackrc
@@ -1 +1,2 @@
--ignore-directory=is:build
+--ignore-directory=is:pubs.egg-info
diff --git a/NOTES b/NOTES
index 76dd465..ab4ed53 100644
--- a/NOTES
+++ b/NOTES
@@ -14,13 +14,12 @@ A paper correspond to 3 files :
About strings:
--------------
-- pybtex seems to store entries as utf-8 (TODO: check)
- so assumption is made that everything is utf-8
- conversions are performed at print time
Config values:
--------------
-[papers]
+[pubs]
open-cmd = open
edit-cmd = edit
import-copy = True
diff --git a/pubs/__init__.py b/pubs/__init__.py
index b081b7c..b350a5d 100644
--- a/pubs/__init__.py
+++ b/pubs/__init__.py
@@ -1 +1 @@
-__version__ = 4
\ No newline at end of file
+__version__ = 4
diff --git a/pubs/bibstruct.py b/pubs/bibstruct.py
index 087923a..1d46345 100644
--- a/pubs/bibstruct.py
+++ b/pubs/bibstruct.py
@@ -21,20 +21,25 @@ def check_citekey(citekey):
raise ValueError("Invalid citekey: %s" % citekey)
def verify_bibdata(bibdata):
- if not hasattr(bibdata, 'entries') or len(bibdata.entries) == 0:
- raise ValueError('no entries in the bibdata.')
- if len(bibdata.entries) > 1:
+ if bibdata is None or len(bibdata) == 0:
+ raise ValueError('no valid bibdata')
+ if len(bibdata) > 1:
raise ValueError('ambiguous: multiple entries in the bibdata.')
def get_entry(bibdata):
verify_bibdata(bibdata)
- return bibdata.entries.iteritems().next()
+ for e in bibdata.items():
+ return e
def extract_citekey(bibdata):
verify_bibdata(bibdata)
citekey, entry = get_entry(bibdata)
return citekey
+def author_last(author_str):
+ """ Return the last name of the author """
+ return author_str.split(',')[0]
+
def generate_citekey(bibdata):
""" Generate a citekey from bib_data.
@@ -44,17 +49,17 @@ def generate_citekey(bibdata):
"""
citekey, entry = get_entry(bibdata)
- author_key = 'author' if 'author' in entry.persons else 'editor'
+ author_key = 'author' if 'author' in entry else 'editor'
try:
- first_author = entry.persons[author_key][0]
+ first_author = entry[author_key][0]
except KeyError:
raise ValueError(
'No author or editor defined: cannot generate a citekey.')
try:
- year = entry.fields['year']
+ year = entry['year']
except KeyError:
year = ''
- citekey = u'{}{}'.format(u''.join(first_author.last()), year)
+ citekey = u'{}{}'.format(u''.join(author_last(first_author)), year)
return str2citekey(citekey)
@@ -67,21 +72,21 @@ def extract_docfile(bibdata, remove=False):
citekey, entry = get_entry(bibdata)
try:
- if 'file' in entry.fields:
- field = entry.fields['file']
+ if 'file' in entry:
+ field = entry['file']
# Check if this is mendeley specific
for f in field.split(':'):
if len(f) > 0:
break
if remove:
- entry.fields.pop('file')
+ entry.pop('file')
# This is a hck for Mendeley. Make clean
if f[0] != '/':
f = '/' + f
return f
- if 'attachments' in entry.fields:
- return entry.fields['attachments']
- if 'pdf' in entry.fields:
- return entry.fields['pdf']
+ if 'attachments' in entry:
+ return entry['attachments']
+ if 'pdf' in entry:
+ return entry['pdf']
except (KeyError, IndexError):
return None
diff --git a/pubs/commands/add_cmd.py b/pubs/commands/add_cmd.py
index 23bb832..6dfc5ce 100644
--- a/pubs/commands/add_cmd.py
+++ b/pubs/commands/add_cmd.py
@@ -108,7 +108,7 @@ def command(args):
if copy_doc is None:
copy_doc = config().import_copy
if copy_doc:
- docfile = rp.databroker.copy_doc(citekey, docfile)
+ docfile = rp.databroker.add_doc(citekey, docfile)
# create the paper
diff --git a/pubs/commands/attach_cmd.py b/pubs/commands/attach_cmd.py
index 6ffed16..dc5e6bf 100644
--- a/pubs/commands/attach_cmd.py
+++ b/pubs/commands/attach_cmd.py
@@ -34,7 +34,7 @@ def command(args):
try:
document = args.document
if copy:
- document = rp.databroker.copy_doc(paper.citekey, document)
+ document = rp.databroker.add_doc(paper.citekey, document)
else:
pass # TODO warn if file does not exists
paper.docpath = document
diff --git a/pubs/commands/edit_cmd.py b/pubs/commands/edit_cmd.py
index 676e545..ae703fd 100644
--- a/pubs/commands/edit_cmd.py
+++ b/pubs/commands/edit_cmd.py
@@ -15,6 +15,24 @@ def parser(subparsers):
return parser
+def edit_meta(citekey):
+ rp = repo.Repository(config())
+ coder = endecoder.EnDecoder()
+ filepath = os.path.join(rp.databroker.databroker.filebroker.metadir(), citekey+'.yaml')
+ with open(filepath) as f:
+ content = f.read()
+
+
+
+def edit_bib(citekey):
+ rp = repo.Repository(config())
+ coder = endecoder.EnDecoder()
+ filepath = os.path.join(rp.databroker.databroker.filebroker.bibdir(), citekey+'.bib')
+ with open(filepath) as f:
+ content = f.read()
+
+
+
def command(args):
ui = get_ui()
@@ -26,8 +44,7 @@ def command(args):
if meta:
filepath = os.path.join(rp.databroker.databroker.filebroker.metadir(), citekey+'.yaml')
else:
- filepath = os.path.join(rp.databroker.databroker.filebroker.bibdir(), citekey+'.bibyaml')
-
+ filepath = os.path.join(rp.databroker.databroker.filebroker.bibdir(), citekey+'.bib')
with open(filepath) as f:
content = f.read()
diff --git a/pubs/commands/export_cmd.py b/pubs/commands/export_cmd.py
index 1a729ea..cab8201 100644
--- a/pubs/commands/export_cmd.py
+++ b/pubs/commands/export_cmd.py
@@ -1,8 +1,6 @@
from __future__ import print_function
import sys
-from pybtex.database import BibliographyData
-
from .. import repo
from ..configs import config
from ..uis import get_ui
@@ -11,8 +9,8 @@ from .. import endecoder
def parser(subparsers):
parser = subparsers.add_parser('export',
help='export bibliography')
- parser.add_argument('-f', '--bib-format', default='bibtex',
- help='export format')
+ # parser.add_argument('-f', '--bib-format', default='bibtex',
+ # help='export format')
parser.add_argument('citekeys', nargs='*',
help='one or several citekeys')
return parser
@@ -20,11 +18,10 @@ def parser(subparsers):
def command(args):
"""
- :param bib_format (in 'bibtex', 'yaml')
"""
+ # :param bib_format (only 'bibtex' now)
ui = get_ui()
- bib_format = args.bib_format
rp = repo.Repository(config())
@@ -36,12 +33,12 @@ def command(args):
if len(papers) == 0:
papers = rp.all_papers()
- bib = BibliographyData()
+ bib = {}
for p in papers:
- bib.add_entry(p.citekey, p.bibentry)
+ bib[p.citekey] = p.bibentry
try:
exporter = endecoder.EnDecoder()
- bibdata_raw = exporter.encode_bibdata(bib, fmt=bib_format)
+ bibdata_raw = exporter.encode_bibdata(bib)
print(bibdata_raw, end='')
except KeyError:
ui.error("Invalid output format: %s." % bib_format)
diff --git a/pubs/commands/import_cmd.py b/pubs/commands/import_cmd.py
index 717e586..fbd6939 100644
--- a/pubs/commands/import_cmd.py
+++ b/pubs/commands/import_cmd.py
@@ -1,8 +1,6 @@
import os
import datetime
-from pybtex.database import Entry, BibliographyData, FieldDict, Person
-
from .. import repo
from .. import endecoder
from .. import bibstruct
@@ -41,8 +39,9 @@ def many_from_path(bibpath):
bibpath = os.path.expanduser(bibpath)
if os.path.isdir(bibpath):
+ print([os.path.splitext(f)[-1][1:] for f in os.listdir(bibpath)])
all_files = [os.path.join(bibpath, f) for f in os.listdir(bibpath)
- if os.path.splitext(f)[-1][1:] in list(coder.decode_fmt.keys())]
+ if os.path.splitext(f)[-1][1:] == 'bib']
else:
all_files = [bibpath]
@@ -53,10 +52,10 @@ def many_from_path(bibpath):
papers = {}
for b in biblist:
- for k in b.entries:
+ for k in b.keys():
try:
- bibdata = BibliographyData()
- bibdata.entries[k] = b.entries[k]
+ bibdata = {}
+ bibdata[k] = b[k]
papers[k] = Paper(bibdata, citekey=k)
papers[k].added = datetime.datetime.now()
@@ -94,7 +93,7 @@ def command(args):
if copy_doc is None:
copy_doc = config().import_copy
if copy_doc:
- docfile = rp.databroker.copy_doc(p.citekey, docfile)
+ docfile = rp.databroker.add_doc(p.citekey, docfile)
p.docpath = docfile
rp.push_paper(p)
diff --git a/pubs/commands/list_cmd.py b/pubs/commands/list_cmd.py
index 228cf31..2acdec6 100644
--- a/pubs/commands/list_cmd.py
+++ b/pubs/commands/list_cmd.py
@@ -1,9 +1,9 @@
from .. import repo
from .. import pretty
+from .. import bibstruct
from ..configs import config
from ..uis import get_ui
-
class InvalidQuery(ValueError):
pass
@@ -56,20 +56,15 @@ def _get_field_value(query_block):
return (field, value)
-def _lower(string, lower=True):
- if lower:
- return string.lower()
- else:
- return string
-
+def _lower(s, lower=True):
+ return s.lower() if lower else s
def _check_author_match(paper, query, case_sensitive=False):
"""Only checks within last names."""
- if not 'author' in paper.bibentry.persons:
+ if not 'author' in paper.bibentry:
return False
- return any([query in _lower(name, lower=(not case_sensitive))
- for p in paper.bibentry.persons['author']
- for name in p.last()])
+ return any([query == _lower(bibstruct.author_last(p), lower=(not case_sensitive))
+ for p in paper.bibentry['author']])
def _check_tag_match(paper, query, case_sensitive=False):
@@ -78,7 +73,7 @@ def _check_tag_match(paper, query, case_sensitive=False):
def _check_field_match(paper, field, query, case_sensitive=False):
- return query in _lower(paper.bibentry.fields[field],
+ return query in _lower(paper.bibentry[field],
lower=(not case_sensitive))
@@ -92,7 +87,7 @@ def _check_query_block(paper, query_block, case_sensitive=None):
return _check_tag_match(paper, value, case_sensitive=case_sensitive)
elif field == 'author':
return _check_author_match(paper, value, case_sensitive=case_sensitive)
- elif field in paper.bibentry.fields:
+ elif field in paper.bibentry:
return _check_field_match(paper, field, value,
case_sensitive=case_sensitive)
else:
diff --git a/pubs/datacache.py b/pubs/datacache.py
index 8dbec1a..b4fe98a 100644
--- a/pubs/datacache.py
+++ b/pubs/datacache.py
@@ -4,7 +4,7 @@ from . import databroker
class DataCache(object):
""" DataCache class, provides a very similar interface as DataBroker
- Has two roles :
+ Has two roles :
1. Provides a buffer between the commands and the hard drive.
Until a command request a hard drive ressource, it does not touch it.
2. Keeps a up-to-date, pickled version of the repository, to speed up things
@@ -12,7 +12,7 @@ class DataCache(object):
Changes are detected using data modification timestamps.
For the moment, only (1) is implemented.
- """
+ """
def __init__(self, directory, create=False):
self.directory = directory
self._databroker = None
@@ -30,16 +30,16 @@ class DataCache(object):
def pull_metadata(self, citekey):
return self.databroker.pull_metadata(citekey)
-
+
def pull_bibdata(self, citekey):
return self.databroker.pull_bibdata(citekey)
-
+
def push_metadata(self, citekey, metadata):
self.databroker.push_metadata(citekey, metadata)
-
+
def push_bibdata(self, citekey, bibdata):
self.databroker.push_bibdata(citekey, bibdata)
-
+
def push(self, citekey, metadata, bibdata):
self.databroker.push(citekey, metadata, bibdata)
@@ -59,23 +59,23 @@ class DataCache(object):
def verify(self, bibdata_raw):
"""Will return None if bibdata_raw can't be decoded"""
return self.databroker.verify(bibdata_raw)
-
+
# docbroker
def in_docsdir(self, docpath):
return self.databroker.in_docsdir(docpath)
def real_docpath(self, docpath):
- return self.databroker.real_docpath(docpath)
+ return self.databroker.real_docpath(docpath)
- def copy_doc(self, citekey, source_path, overwrite=False):
+ def add_doc(self, citekey, source_path, overwrite=False):
return self.databroker.add_doc(citekey, source_path, overwrite=overwrite)
def remove_doc(self, docpath, silent=True):
return self.databroker.remove_doc(docpath, silent=silent)
def rename_doc(self, docpath, new_citekey):
- return self.databroker.rename_doc(docpath, new_citekey)
+ return self.databroker.rename_doc(docpath, new_citekey)
# notesbroker
@@ -94,7 +94,7 @@ class DataCache(object):
# def __init__(self, cache, directory):
# self.cache = cache
# self.directory = directory
-
+
# def changes(self):
# """ Returns the list of modified files since the last cache was saved to disk"""
# pass
diff --git a/pubs/endecoder.py b/pubs/endecoder.py
index 894784f..6abcdaf 100644
--- a/pubs/endecoder.py
+++ b/pubs/endecoder.py
@@ -1,5 +1,5 @@
-import color
-import yaml
+from __future__ import print_function, absolute_import, division, unicode_literals
+import copy
try:
import cStringIO as StringIO
@@ -7,19 +7,44 @@ except ImportError:
import StringIO
try:
- import pybtex.database.input.bibtex
- import pybtex.database.input.bibtexml
- import pybtex.database.input.bibyaml
- import pybtex.database.output.bibtex
- import pybtex.database.output.bibtexml
- import pybtex.database.output.bibyaml
-
+ import bibtexparser as bp
except ImportError:
print(color.dye('error', color.error) +
- ": you need to install Pybtex; try running 'pip install "
- "pybtex' or 'easy_install pybtex'")
+ ": you need to install bibterxparser; try running 'pip install "
+ "bibtexparser'.")
exit(-1)
+import yaml
+
+from . import color
+
+
+def sanitize_citekey(record):
+ record['id'] = record['id'].strip('\n')
+ return record
+
+def customizations(record):
+ """ Use some functions delivered by the library
+
+ :param record: a record
+ :returns: -- customized record
+ """
+ record = bp.customization.convert_to_unicode(record)
+ record = bp.customization.type(record)
+ record = bp.customization.author(record)
+ record = bp.customization.editor(record)
+ record = bp.customization.journal(record)
+ record = bp.customization.keyword(record)
+ record = bp.customization.link(record)
+ record = bp.customization.page_double_hyphen(record)
+ record = bp.customization.doi(record)
+
+ record = sanitize_citekey(record)
+
+ return record
+
+bibfield_order = ['author', 'title', 'journal', 'institution', 'publisher', 'year', 'month', 'number', 'pages', 'link', 'doi', 'id', 'note', 'abstract']
+
class EnDecoder(object):
""" Encode and decode content.
@@ -32,45 +57,55 @@ class EnDecoder(object):
* encode_bibdata will try to recognize exceptions
"""
- decode_fmt = {'bibtex' : pybtex.database.input.bibtex,
- 'bibyaml' : pybtex.database.input.bibyaml,
- 'bib' : pybtex.database.input.bibtex,
- 'bibtexml': pybtex.database.input.bibtexml}
-
- encode_fmt = {'bibtex' : pybtex.database.output.bibtex,
- 'bibyaml' : pybtex.database.output.bibyaml,
- 'bib' : pybtex.database.output.bibtex,
- 'bibtexml': pybtex.database.output.bibtexml}
-
def encode_metadata(self, metadata):
return yaml.safe_dump(metadata, allow_unicode=True, encoding='UTF-8', indent = 4)
-
+
def decode_metadata(self, metadata_raw):
return yaml.safe_load(metadata_raw)
-
- def encode_bibdata(self, bibdata, fmt='bib'):
+
+ def encode_bibdata(self, bibdata):
"""Encode bibdata """
- s = StringIO.StringIO()
- EnDecoder.encode_fmt[fmt].Writer().write_stream(bibdata, s)
- return s.getvalue()
+ return '\n'.join(self._encode_bibentry(citekey, entry)
+ for citekey, entry in bibdata.items())
+
+ @staticmethod
+ def _encode_field(key, value):
+ if key == 'link':
+ return ', '.join(link['url'] for link in value)
+ elif key == 'author':
+ return ' and '.join(author for author in value)
+ elif key == 'journal':
+ return value['name']
+ else:
+ return value
+
+ @staticmethod
+ def _encode_bibentry(citekey, bibentry):
+ bibraw = '@{}{{{},\n'.format(bibentry['type'], citekey)
+ bibentry = copy.copy(bibentry)
+ for key in bibfield_order:
+ if key in bibentry:
+ value = bibentry.pop(key)
+ bibraw += ' {} = {{{}}},\n'.format(key, EnDecoder._encode_field(key, value))
+ for key, value in bibentry.items():
+ if key != 'type':
+ bibraw += ' {} = {{{}}},\n'.format(key, EnDecoder._encode_field(key, value))
+ bibraw += '}\n'
+ return bibraw
def decode_bibdata(self, bibdata_raw):
""""""
bibdata_rawutf8 = bibdata_raw
-# bibdata_rawutf8 = unicode(bibdata_raw, 'utf8') # FIXME this doesn't work
- for fmt in EnDecoder.decode_fmt.values():
- try:
- bibdata_stream = StringIO.StringIO(bibdata_rawutf8)
- return self._decode_bibdata(bibdata_stream, fmt.Parser())
- except ValueError:
- pass
- raise ValueError('could not parse bibdata')
+ #bibdata_rawutf8 = unicode(bibdata_raw, 'utf8') # FIXME this doesn't work
+ bibdata_stream = StringIO.StringIO(bibdata_rawutf8)
+ return self._decode_bibdata(bibdata_stream)
- def _decode_bibdata(self, bibdata_stream, parser):
+ def _decode_bibdata(self, bibdata_stream):
try:
- entry = parser.parse_stream(bibdata_stream)
- if len(entry.entries) > 0:
- return entry
+ entries = bp.bparser.BibTexParser(bibdata_stream, customization=customizations).get_entry_dict()
+ if len(entries) > 0:
+ return entries
except Exception:
- pass
+ import traceback
+ traceback.print_exc()
raise ValueError('could not parse bibdata')
diff --git a/pubs/filebroker.py b/pubs/filebroker.py
index 37b675d..c6b1b0d 100644
--- a/pubs/filebroker.py
+++ b/pubs/filebroker.py
@@ -22,7 +22,7 @@ class FileBroker(object):
"""
def __init__(self, directory, create=False):
- self.directory = directory
+ self.directory = directory
self.metadir = os.path.join(self.directory, 'meta')
self.bibdir = os.path.join(self.directory, 'bib')
if create:
@@ -30,7 +30,7 @@ class FileBroker(object):
check_directory(self.directory)
check_directory(self.metadir)
check_directory(self.bibdir)
-
+
def _create(self):
if not check_directory(self.directory, fail = False):
os.mkdir(self.directory)
@@ -38,7 +38,7 @@ class FileBroker(object):
os.mkdir(self.metadir)
if not check_directory(self.bibdir, fail = False):
os.mkdir(self.bibdir)
-
+
def pull_metafile(self, citekey):
filepath = os.path.join(self.metadir, citekey + '.yaml')
return read_file(filepath)
@@ -46,17 +46,17 @@ class FileBroker(object):
def pull_bibfile(self, citekey):
filepath = os.path.join(self.bibdir, citekey + '.bib')
return read_file(filepath)
-
+
def push_metafile(self, citekey, metadata):
"""Put content to disk. Will gladly override anything standing in its way."""
filepath = os.path.join(self.metadir, citekey + '.yaml')
write_file(filepath, metadata)
-
+
def push_bibfile(self, citekey, bibdata):
"""Put content to disk. Will gladly override anything standing in its way."""
filepath = os.path.join(self.bibdir, citekey + '.bib')
write_file(filepath, bibdata)
-
+
def push(self, citekey, metadata, bibdata):
"""Put content to disk. Will gladly override anything standing in its way."""
self.push_metafile(citekey, metadata)
@@ -72,10 +72,10 @@ class FileBroker(object):
def exists(self, citekey, both=True):
if both:
- return (check_file(os.path.join(self.metadir, citekey + '.yaml'), fail=False) and
+ return (check_file(os.path.join(self.metadir, citekey + '.yaml'), fail=False) and
check_file(os.path.join(self.bibdir, citekey + '.bib'), fail=False))
else:
- return (check_file(os.path.join(self.metadir, citekey + '.yaml'), fail=False) or
+ return (check_file(os.path.join(self.metadir, citekey + '.yaml'), fail=False) or
check_file(os.path.join(self.bibdir, citekey + '.bib'), fail=False))
@@ -131,9 +131,9 @@ class DocBroker(object):
# return check_file(os.path.join(self.docdir, citekey + ext), fail=False)
def real_docpath(self, docpath):
- """Return the full path
+ """ Return the full path
Essentially transform pubsdir://doc/{citekey}.{ext} to /path/to/pubsdir/doc/{citekey}.{ext}.
- Return absoluted paths of regular ones otherwise.
+ Return absoluted paths of regular ones otherwise.
"""
if self.in_docsdir(docpath):
parsed = urlparse.urlparse(docpath)
@@ -160,7 +160,7 @@ class DocBroker(object):
full_target_path = self.real_docpath(target_path)
if not overwrite and check_file(full_target_path, fail=False):
raise IOError('{} file exists.'.format(full_target_path))
-
+
doc_content = get_content(full_source_path)
with open(full_target_path, 'wb') as f:
f.write(doc_content)
@@ -169,7 +169,7 @@ class DocBroker(object):
def remove_doc(self, docpath, silent=True):
""" Will remove only file hosted in docsdir://
-
+
:raise ValueError: for other paths, unless :param silent: is True
"""
if not self.in_docsdir(docpath):
@@ -196,4 +196,4 @@ class DocBroker(object):
new_docpath = self.add_doc(new_citekey, docpath)
self.remove_doc(docpath)
- return new_docpath
\ No newline at end of file
+ return new_docpath
diff --git a/pubs/paper.py b/pubs/paper.py
index e074356..1b0a8ba 100644
--- a/pubs/paper.py
+++ b/pubs/paper.py
@@ -11,7 +11,7 @@ class Paper(object):
""" Paper class.
The object is not responsible of any disk I/O.
- self.bibdata is a pybtex.database.BibliographyData object
+ self.bibdata is a dictionary of bibligraphic fields
self.metadata is a dictionary
The paper class provides methods to access the fields for its metadata
@@ -43,10 +43,18 @@ class Paper(object):
return 'Paper(%s, %s, %s)' % (
self.citekey, self.bibentry, self.metadata)
- def deepcopy(self):
+ def __deepcopy__(self, memo):
+ return Paper(citekey =self.citekey,
+ metadata=copy.deepcopy(self.metadata, memo),
+ bibdata=copy.deepcopy(self.bibdata, memo))
+
+ def __copy__(self):
return Paper(citekey =self.citekey,
- metadata=copy.deepcopy(self.metadata),
- bibdata=copy.deepcopy(self.bibdata))
+ metadata=self.metadata,
+ bibdata=self.bibdata)
+
+ def deepcopy(self):
+ return self.__deepcopy__({})
# docpath
diff --git a/pubs/pretty.py b/pubs/pretty.py
index 8a107dd..e8fd3ac 100644
--- a/pubs/pretty.py
+++ b/pubs/pretty.py
@@ -1,23 +1,19 @@
# display formatting
from . import color
-from pybtex.bibtex.utils import bibtex_purify
-# A bug in pybtex makes the abbreviation wrong here
-# (Submitted with racker ID: ID: 3605659)
-# The purification should also be applied to names but unfortunately
-# it removes dots which is annoying on abbreviations.
+# should be adaptated to bibtexparser dicts
def person_repr(p):
+ raise NotImplementedError
return ' '.join(s for s in [
' '.join(p.first(abbr=True)),
' '.join(p.last(abbr=False)),
' '.join(p.lineage(abbr=True))] if s)
-
def short_authors(bibentry):
try:
- authors = [person_repr(p) for p in bibentry.persons['author']]
+ authors = [p for p in bibentry['author']]
if len(authors) < 3:
return ', '.join(authors)
else:
@@ -28,27 +24,26 @@ def short_authors(bibentry):
def bib_oneliner(bibentry):
authors = short_authors(bibentry)
- title = bibtex_purify(bibentry.fields['title'])
- year = bibtex_purify(bibentry.fields.get('year', ''))
- journal = ''
- field = 'journal'
- if bibentry.type == 'inproceedings':
- field = 'booktitle'
- journal = bibtex_purify(bibentry.fields.get(field, ''))
+ journal, journal_field = '', 'journal'
+ if 'journal' in bibentry:
+ journal = bibentry['journal']['name']
+ elif bibentry['type'] == 'inproceedings':
+ journal = bibentry.get('booktitle', '')
+
return u'{authors} \"{title}\" {journal} ({year})'.format(
authors=color.dye(authors, color.cyan),
- title=title,
+ title=bibentry['title'],
journal=color.dye(journal, color.yellow),
- year=year,
+ year=bibentry['year'],
)
def bib_desc(bib_data):
- article = bib_data.entries[list(bib_data.entries.keys())[0]]
- s = '\n'.join('author: {}'.format(person_repr(p))
- for p in article.persons['author'])
+ article = bib_data[list(bib_data.keys())[0]]
+ s = '\n'.join('author: {}'.format(p)
+ for p in article['author'])
s += '\n'
- s += '\n'.join('{}: {}'.format(k, v) for k, v in article.fields.items())
+ s += '\n'.join('{}: {}'.format(k, v) for k, v in article.items())
return s
@@ -62,4 +57,4 @@ def paper_oneliner(p, n = 0, citekey_only = False):
descr=bibdesc,
tags=color.dye(' '.join(p.tags),
color.tag, bold=False),
- )).encode('utf-8')
\ No newline at end of file
+ )).encode('utf-8')
diff --git a/pubs/repo.py b/pubs/repo.py
index df474ba..46630b0 100644
--- a/pubs/repo.py
+++ b/pubs/repo.py
@@ -2,10 +2,8 @@ import shutil
import glob
import itertools
-from pybtex.database import BibliographyData
-
from . import bibstruct
-from . import events
+from . import events
from . import datacache
from .paper import Paper
@@ -70,7 +68,7 @@ class Repository(object):
raise IOError('files using the {} citekey already exists'.format(paper.citekey))
if (not overwrite) and self.citekeys is not None and paper.citekey in self.citekeys:
raise CiteKeyCollision('citekey {} already in use'.format(paper.citekey))
-
+
self.databroker.push_bibdata(paper.citekey, paper.bibdata)
self.databroker.push_metadata(paper.citekey, paper.metadata)
self.citekeys.add(paper.citekey)
@@ -79,7 +77,7 @@ class Repository(object):
def remove_paper(self, citekey, remove_doc=True, event=True):
""" Remove a paper. Is silent if nothing needs to be done."""
-
+
if event:
events.RemoveEvent(citekey).send()
if remove_doc:
@@ -89,7 +87,7 @@ class Repository(object):
self.databroker.remove_doc(docpath, silent=True)
self.databroker.remove_note(citekey, silent=True)
except IOError:
- pass # FXME: if IOError is about being unable to
+ pass # FXME: if IOError is about being unable to
# remove the file, we need to issue an error.I
self.citekeys.remove(citekey)
@@ -103,11 +101,10 @@ class Repository(object):
else:
# check if new_citekey does not exists
if self.databroker.exists(new_citekey, both=False):
- raise IOError("can't rename paper to {}, conflicting files exists".format(new_citekey))
+ raise IOError("can't rename paper to {}, conflicting files exists".format(new_citekey))
- # modify bibdata (__delitem__ not implementd by pybtex)
- new_bibdata = BibliographyData()
- new_bibdata.entries[new_citekey] = paper.bibdata.entries[old_citekey]
+ new_bibdata = {}
+ new_bibdata[new_citekey] = paper.bibdata[old_citekey]
paper.bibdata = new_bibdata
# move doc file if necessary
diff --git a/README.md b/readme.md
similarity index 61%
rename from README.md
rename to readme.md
index cdc8c9b..50e9ca7 100644
--- a/README.md
+++ b/readme.md
@@ -1,11 +1,10 @@
-Papers
-======
+# Pubs
-Papers brings your bibliography to the command line.
+Pubs brings your bibliography to the command line.
-Papers organizes your bibliographic documents together with the bibliographic data associated to them and provides command line access to basic and advanced manipulation of your library.
+Pubs organizes your bibliographic documents together with the bibliographic data associated to them and provides command line access to basic and advanced manipulation of your library.
-Papers is built with the following principles in mind:
+Pubs is built with the following principles in mind:
- all papers are referenced using unique citation keys,
- bibliographic data (i.e. pure bibtex information) is kept separated from metadata (including links to pdf or tags),
@@ -19,14 +18,14 @@ Getting started
---------------
Create your library (by default, goes to '~/.papers/').
- papers init
+ pubs init
Import existing data from bibtex (papers will try to automatically copy documents defined as 'file' in bibtex):
- papers import path/to/collection.bib
+ pubss import path/to/collection.bib
or for bibtex containing a single file:
- papers add --bibfile article.bib --docfile article.pdf
+ pubs add --bibfile article.bib --docfile article.pdf
Authors
diff --git a/setup.py b/setup.py
index 3a9a9fe..0fe96ef 100644
--- a/setup.py
+++ b/setup.py
@@ -8,7 +8,7 @@ setup(name='pubs',
author_email='fabien.benureau+inria@gmail.com',
url='',
description='research papers manager',
- requires=['pybtex'],
+ requires=['bibtexparser'],
packages=find_packages(),
package_data={'': ['*.tex', '*.sty']},
scripts=['pubs/pubs']
diff --git a/tests/zoo/incollections.bib b/tests/bibexamples/incollections.bib
similarity index 100%
rename from tests/zoo/incollections.bib
rename to tests/bibexamples/incollections.bib
diff --git a/tests/testenv.py b/tests/dotdot.py
similarity index 100%
rename from tests/testenv.py
rename to tests/dotdot.py
diff --git a/tests/fake_env.py b/tests/fake_env.py
index 09c49e3..3f69ffc 100644
--- a/tests/fake_env.py
+++ b/tests/fake_env.py
@@ -6,7 +6,7 @@ import unittest
import pkgutil
import re
-import testenv
+import dotdot
import fake_filesystem
import fake_filesystem_shutil
import fake_filesystem_glob
diff --git a/tests/fixtures.py b/tests/fixtures.py
index 2644ff1..7ec3571 100644
--- a/tests/fixtures.py
+++ b/tests/fixtures.py
@@ -1,8 +1,6 @@
# -*- coding: utf-8 -*-
-from pybtex.database import Person
-
-import testenv
+import dotdot
from pubs import endecoder
import str_fixtures
@@ -21,8 +19,18 @@ doe_bib = """
year = "2013"}
"""
-franny_bibdata = coder.decode_bibdata(franny_bib)
-doe_bibdata = coder.decode_bibdata(doe_bib)
-turing_bibdata = coder.decode_bibdata(str_fixtures.turing_bib)
-page_bibdata = coder.decode_bibdata(str_fixtures.bibtex_raw0)
-page_metadata = coder.decode_metadata(str_fixtures.metadata_raw0)
\ No newline at end of file
+franny_bibdata = coder.decode_bibdata(franny_bib)
+franny_bibentry = franny_bibdata['Franny1961']
+
+doe_bibdata = coder.decode_bibdata(doe_bib)
+doe_bibentry = doe_bibdata['Doe2013']
+
+turing_bibdata = coder.decode_bibdata(str_fixtures.turing_bib)
+turing_bibentry = turing_bibdata['turing1950computing']
+turing_metadata = coder.decode_metadata(str_fixtures.turing_meta)
+
+page_bibdata = coder.decode_bibdata(str_fixtures.bibtex_raw0)
+page_bibentry = page_bibdata['Page99']
+page_metadata = coder.decode_metadata(str_fixtures.metadata_raw0)
+
+page_metadata = coder.decode_metadata(str_fixtures.metadata_raw0)
diff --git a/tests/str_fixtures.py b/tests/str_fixtures.py
index 4026465..7274e51 100644
--- a/tests/str_fixtures.py
+++ b/tests/str_fixtures.py
@@ -1,71 +1,3 @@
-bibyaml_raw0 = """entries:
- Page99:
- abstract: The importance of a Web page is an inherently subjective matter,
- which depends on the readers interests, knowledge and attitudes. But there
- is still much that can be said objectively about the relative importance
- of Web pages. This paper describes PageRank, a mathod for rating Web pages
- objectively and mechanically, effectively measuring the human interest
- and attention devoted to them. We compare PageRank to an idealized random
- Web surfer. We show how to efficiently compute PageRank for large numbers
- of pages. And, we show how to apply PageRank to search and to user navigation.
- author:
- - first: Lawrence
- last: Page
- - first: Sergey
- last: Brin
- - first: Rajeev
- last: Motwani
- - first: Terry
- last: Winograd
- institution: Stanford InfoLab
- month: November
- note: Previous number = SIDL-WP-1999-0120
- number: 1999-66
- publisher: Stanford InfoLab
- title: 'The PageRank Citation Ranking: Bringing Order to the Web.'
- type: techreport
- url: http://ilpubs.stanford.edu:8090/422/
- year: '1999'
-"""
-
-bibtexml_raw0 = """
-
The advent of humanoid robots has enabled a new approach to investigating the acquisition of language, and we report on the development of robots able to acquire rudimentary linguistic skills. Our work focuses on early stages analogous to some characteristics of a human child of about 6 to 14 months, the transition from babbling to first word forms. We investigate one mechanism among many that may contribute to this process, a key factor being the sensitivity of learners to the statistical distribution of linguistic elements. As well as being necessary for learning word meanings, the acquisition of anchor word forms facilitates the segmentation of an acoustic stream through other mechanisms. In our experiments some salient one-syllable word forms are learnt by a humanoid robot in real-time interactions with naive participants. Words emerge from random syllabic babble through a learning process based on a dialogue between the robot and the human participant, whose speech is perceived by the robot as a stream of phonemes. Numerous ways of representing the speech as syllabic segments are possible. Furthermore, the pronunciation of many words in spontaneous speech is variable. However, in line with research elsewhere, we observe that salient content words are more likely than function words to have consistent canonical representations; thus their relative frequency increases, as does their influence on the learner. Variable pronunciation may contribute to early word form acquisition. The importance of contingent interaction in real-time between teacher and learner is reflected by a reinforcement process, with variable success. The examination of individual cases may be more informative than group results. Nevertheless, word forms are usually produced by the robot after a few minutes of dialogue, employing a simple, real-time, frequency dependent mechanism. This work shows the potential of human-robot interaction systems in studies of the dynamics of early language acquisition.
}, + number = {6}, + doi = {10.1371/journal.pone.0038236} +} diff --git a/tests/testrepo/bib/10.1371_journal.pone.0038236.bibyaml b/tests/testrepo/bib/10.1371_journal.pone.0038236.bibyaml deleted file mode 100644 index 26da434..0000000 --- a/tests/testrepo/bib/10.1371_journal.pone.0038236.bibyaml +++ /dev/null @@ -1,45 +0,0 @@ -entries: - 10.1371_journal.pone.0038236: - abstract:The advent of humanoid robots has enabled a new approach to investigating - the acquisition of language, and we report on the development of robots - able to acquire rudimentary linguistic skills. Our work focuses on early - stages analogous to some characteristics of a human child of about 6 to - 14 months, the transition from babbling to first word forms. We investigate - one mechanism among many that may contribute to this process, a key factor - being the sensitivity of learners to the statistical distribution of linguistic - elements. As well as being necessary for learning word meanings, the acquisition - of anchor word forms facilitates the segmentation of an acoustic stream - through other mechanisms. In our experiments some salient one-syllable - word forms are learnt by a humanoid robot in real-time interactions with - naive participants. Words emerge from random syllabic babble through a - learning process based on a dialogue between the robot and the human participant, - whose speech is perceived by the robot as a stream of phonemes. Numerous - ways of representing the speech as syllabic segments are possible. Furthermore, - the pronunciation of many words in spontaneous speech is variable. However, - in line with research elsewhere, we observe that salient content words - are more likely than function words to have consistent canonical representations; - thus their relative frequency increases, as does their influence on the - learner. Variable pronunciation may contribute to early word form acquisition. - The importance of contingent interaction in real-time between teacher - and learner is reflected by a reinforcement process, with variable success. - The examination of individual cases may be more informative than group - results. Nevertheless, word forms are usually produced by the robot after - a few minutes of dialogue, employing a simple, real-time, frequency dependent - mechanism. This work shows the potential of human-robot interaction systems - in studies of the dynamics of early language acquisition.
- author: - - first: Caroline - last: Saunders - middle: Lyon AND Chrystopher L. Nehaniv AND Joe - doi: 10.1371/journal.pone.0038236 - journal: PLoS ONE - month: '06' - number: '6' - pages: e38236 - publisher: Public Library of Science - title: 'Interactive Language Learning by Robots: The Transition from Babbling - to Word Forms' - type: article - url: http://dx.doi.org/10.1371%2Fjournal.pone.0038236 - volume: '7' - year: '2012' diff --git a/tests/testrepo/bib/10.1371journal.pone.0063400.bib b/tests/testrepo/bib/10.1371journal.pone.0063400.bib new file mode 100644 index 0000000..4bc2500 --- /dev/null +++ b/tests/testrepo/bib/10.1371journal.pone.0063400.bib @@ -0,0 +1,15 @@ + +@article{10.1371/journal.pone.0063400, + author = {Martius, , Georg AND Der, , Ralf AND Ay, , Nihat}, + journal = {PLoS ONE}, + publisher = {Public Library of Science}, + title = {Information Driven Self-Organization of Complex Robotic Behaviors}, + year = {2013}, + month = {05}, + volume = {8}, + url = {http://dx.doi.org/10.1371%2Fjournal.pone.0063400}, + pages = {e63400}, + abstract = {Information theory is a powerful tool to express principles to drive autonomous systems because it is domain invariant and allows for an intuitive interpretation. This paper studies the use of the predictive information (PI), also called excess entropy or effective measure complexity, of the sensorimotor process as a driving force to generate behavior. We study nonlinear and nonstationary systems and introduce the time-local predicting information (TiPI) which allows us to derive exact results together with explicit update rules for the parameters of the controller in the dynamical systems framework. In this way the information principle, formulated at the level of behavior, is translated to the dynamics of the synapses. We underpin our results with a number of case studies with high-dimensional robotic systems. We show the spontaneous cooperativity in a complex physical system with decentralized control. Moreover, a jointly controlled humanoid robot develops a high behavioral variety depending on its physics and the environment it is dynamically embedded into. The behavior can be decomposed into a succession of low-dimensional modes that increasingly explore the behavior space. This is a promising way to avoid the curse of dimensionality which hinders learning systems to scale well.
}, + number = {5}, + doi = {10.1371/journal.pone.0063400} +} diff --git a/tests/testrepo/bib/10.1371journal.pone.0063400.bibyaml b/tests/testrepo/bib/10.1371journal.pone.0063400.bibyaml deleted file mode 100644 index bdfda50..0000000 --- a/tests/testrepo/bib/10.1371journal.pone.0063400.bibyaml +++ /dev/null @@ -1,36 +0,0 @@ -entries: - 10.1371journal.pone.0063400: - abstract:Information theory is a powerful tool to express principles to - drive autonomous systems because it is domain invariant and allows for - an intuitive interpretation. This paper studies the use of the predictive - information (PI), also called excess entropy or effective measure complexity, - of the sensorimotor process as a driving force to generate behavior. We - study nonlinear and nonstationary systems and introduce the time-local - predicting information (TiPI) which allows us to derive exact results - together with explicit update rules for the parameters of the controller - in the dynamical systems framework. In this way the information principle, - formulated at the level of behavior, is translated to the dynamics of - the synapses. We underpin our results with a number of case studies with - high-dimensional robotic systems. We show the spontaneous cooperativity - in a complex physical system with decentralized control. Moreover, a jointly - controlled humanoid robot develops a high behavioral variety depending - on its physics and the environment it is dynamically embedded into. The - behavior can be decomposed into a succession of low-dimensional modes - that increasingly explore the behavior space. This is a promising way - to avoid the curse of dimensionality which hinders learning systems to - scale well.
- author: - - first: Georg - last: Ay - middle: Martius AND Ralf Der AND Nihat - doi: 10.1371/journal.pone.0063400 - journal: PLoS ONE - month: '05' - number: '5' - pages: e63400 - publisher: Public Library of Science - title: Information Driven Self-Organization of Complex Robotic Behaviors - type: article - url: http://dx.doi.org/10.1371%2Fjournal.pone.0063400 - volume: '8' - year: '2013' diff --git a/tests/testrepo/bib/Page99.bib b/tests/testrepo/bib/Page99.bib new file mode 100644 index 0000000..89d2df9 --- /dev/null +++ b/tests/testrepo/bib/Page99.bib @@ -0,0 +1,13 @@ +@techreport{Page99, + number = {1999-66}, + month = {November}, + author = {Lawrence Page and Sergey Brin and Rajeev Motwani and Terry Winograd}, + note = {Previous number = SIDL-WP-1999-0120}, + title = {The PageRank Citation Ranking: Bringing Order to the Web.}, + type = {Technical Report}, + publisher = {Stanford InfoLab}, + year = {1999}, + institution = {Stanford InfoLab}, + url = {http://ilpubs.stanford.edu:8090/422/}, + abstract = {The importance of a Web page is an inherently subjective matter, which depends on the readers interests, knowledge and attitudes. But there is still much that can be said objectively about the relative importance of Web pages. This paper describes PageRank, a mathod for rating Web pages objectively and mechanically, effectively measuring the human interest and attention devoted to them. We compare PageRank to an idealized random Web surfer. We show how to efficiently compute PageRank for large numbers of pages. And, we show how to apply PageRank to search and to user navigation.} +} diff --git a/tests/testrepo/bib/Page99.bibyaml b/tests/testrepo/bib/Page99.bibyaml deleted file mode 100644 index 3e77c1c..0000000 --- a/tests/testrepo/bib/Page99.bibyaml +++ /dev/null @@ -1,28 +0,0 @@ -entries: - Page99: - abstract: The importance of a Web page is an inherently subjective matter, - which depends on the readers interests, knowledge and attitudes. But there - is still much that can be said objectively about the relative importance - of Web pages. This paper describes PageRank, a mathod for rating Web pages - objectively and mechanically, effectively measuring the human interest - and attention devoted to them. We compare PageRank to an idealized random - Web surfer. We show how to efficiently compute PageRank for large numbers - of pages. And, we show how to apply PageRank to search and to user navigation. - author: - - first: Lawrence - last: Page - - first: Sergey - last: Brin - - first: Rajeev - last: Motwani - - first: Terry - last: Winograd - institution: Stanford InfoLab - month: November - note: Previous number = SIDL-WP-1999-0120 - number: 1999-66 - publisher: Stanford InfoLab - title: 'The PageRank Citation Ranking: Bringing Order to the Web.' - type: techreport - url: http://ilpubs.stanford.edu:8090/422/ - year: '1999' diff --git a/tests/testrepo/bib/journal0063400.bib b/tests/testrepo/bib/journal0063400.bib new file mode 100644 index 0000000..292026f --- /dev/null +++ b/tests/testrepo/bib/journal0063400.bib @@ -0,0 +1,6 @@ +@article{10.1371/journal.pone.0063400, + author = {Martius, , Georg AND Der, , Ralf AND Ay, , Nihat}, + journal = {PLoS ONE}, + publisher = {Public Library of Science}, + title = {Information Driven Self-Organization of Complex Robotic Behaviors}, +} diff --git a/tests/testrepo/bib/journal0063400.bibyaml b/tests/testrepo/bib/journal0063400.bibyaml deleted file mode 100644 index 041a029..0000000 --- a/tests/testrepo/bib/journal0063400.bibyaml +++ /dev/null @@ -1,15 +0,0 @@ -entries: - journal0063400: - author: - - first: Lawrence - last: Page - - first: Sergey - last: Brin - - first: Rajeev - last: Motwani - - first: Terry - last: Winograd - journal: PLoS ONE - publisher: Public Library of Science - title: Information Driven Self-Organization of Complex Robotic Behaviors - type: article