Compare commits
No commits in common. "ef67582aa41a78e87ae1b02646ebed67572c82a6" and "7f2c84f53d9c0ad5e18ae24e9e512e61fdfecc47" have entirely different histories.
ef67582aa4
...
7f2c84f53d
|
@ -1,2 +0,0 @@
|
||||||
__pycache__
|
|
||||||
*.p[oc]
|
|
Binary file not shown.
|
@ -1,113 +0,0 @@
|
||||||
#!/usr/bin/env python
|
|
||||||
"""Search Dash/Zeal-compatible docset(s) for given search term."""
|
|
||||||
|
|
||||||
import argparse
|
|
||||||
import os
|
|
||||||
import pathlib
|
|
||||||
import plistlib
|
|
||||||
import sqlite3
|
|
||||||
|
|
||||||
IDX_PATH = pathlib.PurePath("Contents", "Resources", "docSet.dsidx")
|
|
||||||
DOC_PATH = pathlib.PurePath("Contents", "Resources", "Documents")
|
|
||||||
EXACT_SEARCH_SQL = """\
|
|
||||||
SELECT name, path
|
|
||||||
FROM searchIndex
|
|
||||||
WHERE name = ?
|
|
||||||
COLLATE UNICODE_NOCASE
|
|
||||||
LIMIT {limit:d};
|
|
||||||
"""
|
|
||||||
LIKE_SEARCH_SQL = """\
|
|
||||||
SELECT name, path
|
|
||||||
FROM searchIndex
|
|
||||||
WHERE name LIKE ? ESCAPE '\\'
|
|
||||||
COLLATE UNICODE_NOCASE
|
|
||||||
LIMIT {limit:d};
|
|
||||||
"""
|
|
||||||
|
|
||||||
|
|
||||||
def get_docsets_dir():
|
|
||||||
docsets_dir = os.getenv("DASH_DOCSETS_PATH")
|
|
||||||
|
|
||||||
if not docsets_dir:
|
|
||||||
data_home = pathlib.Path(os.getenv("XDG_DATA_HOME", pathlib.Path.home() / '.local' / 'share'))
|
|
||||||
docsets_dir = data_home / "Zeal" / "Zeal" / "docsets"
|
|
||||||
|
|
||||||
return docsets_dir
|
|
||||||
|
|
||||||
|
|
||||||
def get_docset_indices(name=None):
|
|
||||||
result = []
|
|
||||||
for p in get_docsets_dir().iterdir():
|
|
||||||
if p.is_dir() and p.suffix == ".docset":
|
|
||||||
if name:
|
|
||||||
info_path = p / "Contents" / "Info.plist"
|
|
||||||
with open(info_path, "rb") as fp:
|
|
||||||
info = plistlib.load(fp)
|
|
||||||
|
|
||||||
if not info.get("CFBundleIdentifier") == name.lower():
|
|
||||||
continue
|
|
||||||
|
|
||||||
result.append((p / IDX_PATH, p / DOC_PATH))
|
|
||||||
|
|
||||||
return sorted(result)
|
|
||||||
|
|
||||||
|
|
||||||
def get_docset_index(name):
|
|
||||||
return get_docsets_dir() / (name + ".docset") / IDX_PATH
|
|
||||||
|
|
||||||
|
|
||||||
# Custom collation, maybe it is more efficient to store strings
|
|
||||||
def unicode_nocase_collation(a: str, b: str):
|
|
||||||
if a.casefold() == b.casefold():
|
|
||||||
return 0
|
|
||||||
if a.casefold() < b.casefold():
|
|
||||||
return -1
|
|
||||||
return 1
|
|
||||||
|
|
||||||
|
|
||||||
def main(args=None):
|
|
||||||
ap = argparse.ArgumentParser(usage=__doc__.splitlines()[0])
|
|
||||||
ap.add_argument("-l", "--limit", type=int, default=10, metavar="INT",
|
|
||||||
help="Set maximum number of search results (default: %(default)i)")
|
|
||||||
ap.add_argument("searchphrase", help="Phrase to search for. You can prefix the docset to search in separated by a colon, e.g. 'js:alert'")
|
|
||||||
|
|
||||||
args = ap.parse_args(args)
|
|
||||||
|
|
||||||
try:
|
|
||||||
prefix, search = (x.strip() for x in args.searchphrase.split(":", 1))
|
|
||||||
except (TypeError, ValueError):
|
|
||||||
search = args.searchphrase.strip()
|
|
||||||
prefix = None
|
|
||||||
|
|
||||||
indices = get_docset_indices(prefix)
|
|
||||||
search = search.replace("\\", r"\\\\")
|
|
||||||
search = search.replace("%", "\\%")
|
|
||||||
search = search.replace("_", "\\_")
|
|
||||||
search = "%" + search + "%"
|
|
||||||
|
|
||||||
for index, docroot in indices:
|
|
||||||
with sqlite3.connect(index) as cnx:
|
|
||||||
cnx.create_collation("UNICODE_NOCASE", unicode_nocase_collation)
|
|
||||||
cur = cnx.cursor()
|
|
||||||
cur.execute(EXACT_SEARCH_SQL.format(limit=args.limit), (search,))
|
|
||||||
results = {name: path for name,path in cur.fetchall()}
|
|
||||||
num_results = len(results)
|
|
||||||
|
|
||||||
if num_results < args.limit:
|
|
||||||
cur.execute(LIKE_SEARCH_SQL.format(limit=args.limit), (search,))
|
|
||||||
|
|
||||||
for i, (name, path) in enumerate(cur.fetchall()):
|
|
||||||
if name not in results:
|
|
||||||
results[name] = path
|
|
||||||
|
|
||||||
if num_results + i + 1 >= args.limit:
|
|
||||||
break
|
|
||||||
|
|
||||||
for i, name in enumerate(results):
|
|
||||||
file = path.split("#", 1)[0] if '#' in path else path
|
|
||||||
print(f"{i+1} - {name}: {file}")
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
|
||||||
import sys
|
|
||||||
sys.exit(main() or 0)
|
|
|
@ -1,35 +0,0 @@
|
||||||
# employeeclass1.py
|
|
||||||
|
|
||||||
from personclass2 import Person
|
|
||||||
|
|
||||||
|
|
||||||
class Employee(Person):
|
|
||||||
|
|
||||||
def __init__(self, name="", age=0, gender=None, employee_id=None, salary=0):
|
|
||||||
super().__init__(name=name, age=age, gender=gender)
|
|
||||||
self.employee_id = employee_id
|
|
||||||
self.salary = salary
|
|
||||||
|
|
||||||
def __str__(self):
|
|
||||||
return f"Employee: name={self.name} employee_id={self.employee_id}"
|
|
||||||
|
|
||||||
|
|
||||||
employee1 = Employee()
|
|
||||||
|
|
||||||
print(employee1)
|
|
||||||
print(employee1.__class__)
|
|
||||||
print(employee1.name)
|
|
||||||
print(employee1.age)
|
|
||||||
print(employee1.gender)
|
|
||||||
|
|
||||||
employee2 = Employee("Joe Doe", 32, gender="m", employee_id=1, salary=2000)
|
|
||||||
|
|
||||||
print(employee2)
|
|
||||||
print(employee2.__class__)
|
|
||||||
print(employee2.name)
|
|
||||||
print(employee2.age)
|
|
||||||
print(employee2.gender)
|
|
||||||
print(employee2.employee_id)
|
|
||||||
print(employee2.salary)
|
|
||||||
|
|
||||||
|
|
|
@ -1,4 +1,4 @@
|
||||||
# personclass2.py
|
# personclass1.py
|
||||||
|
|
||||||
class Person:
|
class Person:
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue