desc
stringlengths
3
26.7k
decl
stringlengths
11
7.89k
bodies
stringlengths
8
553k
'Returns a unique identifier from `string`. The type of the identifier is backend-specific, and this is typically implemented in the backend-specific subclasses'
@staticmethod def str2id(string):
raise NotImplementedError
'This method opens a scan result, and calls the appropriate store_scan_* method to parse (and store) the scan result.'
def store_scan(self, fname, **kargs):
scanid = utils.hash_file(fname, hashtype='sha256') if self.is_scan_present(scanid): utils.LOGGER.debug('Scan already present in Database (%r).', fname) return False with utils.open_file(fname) as fdesc: fchar = fdesc.read(1) try: store_scan_function = {'<': self.store_scan_xml, '{': self.store_scan_json}[fchar] except KeyError: raise ValueError(('Unknown file type %s' % fname)) return store_scan_function(fname, filehash=scanid, **kargs)
'This method parses an XML scan result, displays a JSON version of the result, and return True if everything went fine, False otherwise. In backend-specific subclasses, this method stores the result instead of displaying it, thanks to the `content_handler` attribute.'
def store_scan_xml(self, fname, **kargs):
parser = xml.sax.make_parser() self.start_store_hosts() try: content_handler = self.content_handler(fname, **kargs) except Exception: utils.LOGGER.warning('Exception (file %r)', fname, exc_info=True) else: parser.setContentHandler(content_handler) parser.setEntityResolver(xmlnmap.NoExtResolver()) parser.parse(utils.open_file(fname)) if (self.output_function is not None): self.output_function(content_handler._db, out=self.output) self.stop_store_hosts() return True self.stop_store_hosts() return False
'Attempt to merge `host` with an existing record. Return `True` if another record for the same address (and source if `host[\'source\'] exists`) has been found, merged and the resulting document inserted in the database, `False` otherwise (in that case, it is the caller\'s responsibility to add `host` to the database if necessary).'
def merge_host(self, host):
try: flt = self.searchhost(host['addr']) if host.get('source'): flt = self.flt_and(flt, self.searchsource(host['source'])) rec = self.get(flt)[0] except IndexError: return False self.store_host(self.merge_host_docs(rec, host)) self.remove(rec) return True
'Backend-specific subclasses may use this method to create some bulk insert structures.'
def start_store_hosts(self):
pass
'Backend-specific subclasses may use this method to commit bulk insert structures.'
def stop_store_hosts(self):
pass
'This method parses a JSON scan result as exported using `ivre scancli --json > file`, displays the parsing result, and return True if everything went fine, False otherwise. In backend-specific subclasses, this method stores the result instead of displaying it, thanks to the `store_host` method.'
def store_scan_json(self, fname, filehash=None, needports=False, needopenports=False, categories=None, source=None, gettoarchive=None, add_addr_infos=True, force_info=False, merge=False, **_):
if (categories is None): categories = [] scan_doc_saved = False self.start_store_hosts() with utils.open_file(fname) as fdesc: for line in fdesc: host = self.json2dbrec(json.loads(line.decode())) for fname in ['_id']: if (fname in host): del host[fname] host['scanid'] = filehash if categories: host['categories'] = categories if (source is not None): host['source'] = source if (add_addr_infos and (self.globaldb is not None) and (force_info or ('infos' not in host) or (not host['infos']))): host['infos'] = {} for func in [self.globaldb.data.country_byip, self.globaldb.data.as_byip, self.globaldb.data.location_byip]: data = func(host['addr']) if data: host['infos'].update(data) if (((not needports) or ('ports' in host)) and ((not needopenports) or host.get('openports', {}).get('count'))): while (host.get('schema_version') in self.__schema_migrations['hosts']): oldvers = host.get('schema_version') self.__schema_migrations['hosts'][oldvers][1](host) if (oldvers == host.get('schema_version')): utils.LOGGER.warning('[%r] could not migrate host from version %r [%r]', self.__class__, oldvers, host) break if (not scan_doc_saved): self.store_scan_doc({'_id': filehash}) scan_doc_saved = True if (merge and self.merge_host(host)): pass else: self.archive_from_func(host, gettoarchive) self.store_host(host) self.stop_store_hosts() return True
'Returns the content of a port\'s screenshot.'
@staticmethod def getscreenshot(port):
url = port.get('screenshot') if (url is None): return None if (url == 'field'): return port.get('screendata')
'Implemented in backend-specific classes.'
def migrate_schema(self, archive, version):
pass
'Converts a record from version 0 (no "schema_version" key in the document) to version 1 (`doc["schema_version"] == 1`). Version 1 adds an "openports" nested document to ease open ports based researches.'
@classmethod def __migrate_schema_hosts_0_1(cls, doc):
assert ('schema_version' not in doc) assert ('openports' not in doc) doc['schema_version'] = 1 openports = {'count': 0} for port in doc.get('ports', []): if (port.get('state_state') == 'open'): openports.setdefault(port['protocol'], {}).setdefault('ports', []).append(port['port']) if (('screenshot' in port) and ('screenwords' not in port)): screenwords = utils.screenwords(cls.getscreenshot(port)) if (screenwords is not None): port['screenwords'] = screenwords for proto in list(openports): if (proto == 'count'): continue count = len(openports[proto]['ports']) openports[proto]['count'] = count openports['count'] += count doc['openports'] = openports
'Converts a record from version 1 to version 2. Version 2 discards service names when they have been found from nmap-services file.'
@staticmethod def __migrate_schema_hosts_1_2(doc):
assert (doc['schema_version'] == 1) doc['schema_version'] = 2 for port in doc.get('ports', []): if (port.get('service_method') == 'table'): for key in list(port): if key.startswith('service_'): del port[key]
'Converts a record from version 2 to version 3. Version 3 uses new Nmap structured data for scripts using the ls library.'
@staticmethod def __migrate_schema_hosts_2_3(doc):
assert (doc['schema_version'] == 2) doc['schema_version'] = 3 migrate_scripts = set(['afp-ls', 'nfs-ls', 'smb-ls', 'ftp-anon', 'http-ls']) for port in doc.get('ports', []): for script in port.get('scripts', []): if (script['id'] in migrate_scripts): if (script['id'] in script): script['ls'] = xmlnmap.change_ls(script.pop(script['id'])) elif ('ls' not in script): data = xmlnmap.add_ls_data(script) if (data is not None): script['ls'] = data for script in doc.get('scripts', []): if (script['id'] in migrate_scripts): data = xmlnmap.add_ls_data(script) if (data is not None): script['ls'] = data
'Converts a record from version 3 to version 4. Version 4 creates a "fake" port entry to store host scripts.'
@staticmethod def __migrate_schema_hosts_3_4(doc):
assert (doc['schema_version'] == 3) doc['schema_version'] = 4 if ('scripts' in doc): doc.setdefault('ports', []).append({'port': 'host', 'scripts': doc.pop('scripts')})
'Converts a record from version 4 to version 5. Version 5 uses the magic value -1 instead of "host" for "port" in the "fake" port entry used to store host scripts (see `migrate_schema_hosts_3_4()`). Moreover, it changes the structure of the values of "extraports" from [totalcount, {"state": count}] to {"total": totalcount, "state": count}.'
@staticmethod def __migrate_schema_hosts_4_5(doc):
assert (doc['schema_version'] == 4) doc['schema_version'] = 5 for port in doc.get('ports', []): if (port['port'] == 'host'): port['port'] = (-1) for (state, (total, counts)) in list(viewitems(doc.get('extraports', {}))): doc['extraports'][state] = {'total': total, 'reasons': counts}
'Converts a record from version 5 to version 6. Version 6 uses Nmap structured data for scripts using the vulns NSE library.'
@staticmethod def __migrate_schema_hosts_5_6(doc):
assert (doc['schema_version'] == 5) doc['schema_version'] = 6 migrate_scripts = set((script for (script, alias) in viewitems(xmlnmap.ALIASES_TABLE_ELEMS) if (alias == 'vulns'))) for port in doc.get('ports', []): for script in port.get('scripts', []): if (script['id'] in migrate_scripts): table = None if (script['id'] in script): table = script.pop(script['id']) script['vulns'] = table elif ('vulns' in script): table = script['vulns'] else: continue newtable = xmlnmap.change_vulns(table) if (newtable != table): script['vulns'] = newtable
'Converts a record from version 6 to version 7. Version 7 creates a structured output for mongodb-databases script.'
@staticmethod def __migrate_schema_hosts_6_7(doc):
assert (doc['schema_version'] == 6) doc['schema_version'] = 7 for port in doc.get('ports', []): for script in port.get('scripts', []): if (script['id'] == 'mongodb-databases'): if ('mongodb-databases' not in script): data = xmlnmap.add_mongodb_databases_data(script) if (data is not None): script['mongodb-databases'] = data
'Converts a record from version 7 to version 8. Version 8 fixes the structured output for scripts using the vulns NSE library.'
@staticmethod def __migrate_schema_hosts_7_8(doc):
assert (doc['schema_version'] == 7) doc['schema_version'] = 8 for port in doc.get('ports', []): for script in port.get('scripts', []): if ('vulns' in script): if any(((elt in script['vulns']) for elt in ['ids', 'refs', 'description', 'state', 'title'])): script['vulns'] = [script['vulns']] else: script['vulns'] = [dict(tab, id=vulnid) for (vulnid, tab) in viewitems(script['vulns'])]
'This method returns for a specific query `flt` a list of dictionary objects whose keys are `id` and `mean`; the value for `id` is a backend-dependant and uniquely identifies a record, and the value for `mean` is given by: (number of open ports) * sum(port number for each open port)'
def get_mean_open_ports(self, flt, archive=False):
return [{'id': self.getid(host), 'mean': reduce((lambda x, y: (x * y)), reduce((lambda x, y: ((x[0] + y[0]), (x[1] + y[1]))), ((1, port['port']) for port in host.get('ports', []) if (port['state_state'] == 'open')), (0, 0)))} for host in self.get(flt, archive=archive, fields=['ports'])]
'Search SSH host keys'
def searchsshkey(self, fingerprint=None, key=None, keytype=None, bits=None, output=None):
params = {'name': 'ssh-hostkey'} if (fingerprint is not None): if (not isinstance(fingerprint, utils.REGEXP_T)): fingerprint = fingerprint.replace(':', '').lower() params.setdefault('values', {})['fingerprint'] = fingerprint if (key is not None): params.setdefault('values', {})['key'] = key if (keytype is not None): params.setdefault('values', {})['type'] = keytype if (bits is not None): params.setdefault('values', {})['bits'] = bits if (output is not None): params['output'] = output return self.searchscript(**params)
'Search particular results from smb-os-discovery host script. Example: .searchsmb(os="Windows 5.1", workgroup="WORKGROUP\x00")'
@classmethod def searchsmb(cls, **args):
if ('dnsdomain' in args): args['domain_dns'] = args.pop('dnsdomain') if ('forest' in args): args['forest_dns'] = args.pop('forest') return cls.searchscript(name='smb-os-discovery', values=args)
'Like `.insert_or_update()`, but `specs` parameter has to be an iterable of (timestamp, spec) values. This generic implementation does not use bulk capacity of the underlying DB implementation but rather calls its `.insert_or_update()` method.'
def insert_or_update_bulk(self, specs, getinfos=None):
for (timestamp, spec) in specs: self.insert_or_update(timestamp, spec, getinfos=getinfos)
'Prepares an agent and adds it to the DB using `self._add_agent()`'
def add_agent(self, masterid, host, remotepath, rsync=None, source=None, maxwaiting=60):
if (rsync is None): rsync = ['rsync'] if (not remotepath.endswith('/')): remotepath += '/' if (source is None): source = (remotepath if (host is None) else ('%s:%s' % (host, remotepath))) master = self.get_master(masterid) localpath = tempfile.mkdtemp(prefix='', dir=master['path']) for dirname in (['input'] + [os.path.join('remote', dname) for dname in ['input', 'cur', 'output']]): utils.makedirs(os.path.join(localpath, dirname)) agent = {'host': host, 'path': {'remote': remotepath, 'local': localpath}, 'source': source, 'rsync': rsync, 'maxwaiting': maxwaiting, 'scan': None, 'sync': True, 'master': masterid} return self._add_agent(agent)
'Adds an agent from a description string of the form [tor:][hostname:]path.'
def add_agent_from_string(self, masterid, string, source=None, maxwaiting=60):
string = string.split(':', 1) if (string[0].lower() == 'tor'): string = string[1].split(':', 1) rsync = ['torify', 'rsync'] else: rsync = None if (len(string) == 1): return self.add_agent(masterid, None, string[0], rsync=rsync, source=source, maxwaiting=maxwaiting) return self.add_agent(masterid, string[0], string[1], rsync=rsync, source=source, maxwaiting=maxwaiting)
'Returns the number of targets that can be added to an agent without exceeding its `maxwaiting` limit (the returned value cannot be negative).'
def may_receive(self, agentid):
agent = self.get_agent(agentid) return max((agent['maxwaiting'] - self.count_waiting_targets(agentid)), 0)
'Returns the number of waiting targets an agent has.'
def count_waiting_targets(self, agentid):
agent = self.get_agent(agentid) return sum((len(os.listdir(self.get_local_path(agent, path))) for path in ['input', os.path.join('remote', 'input')]))
'Returns the number of waiting targets an agent has.'
def count_current_targets(self, agentid):
agent = self.get_agent(agentid) return sum((1 for fname in os.listdir(self.get_local_path(agent, os.path.join('remote', 'cur'))) if fname.endswith('.xml')))
'Adds an agent and returns its (backend-specific) unique identifier. This is implemented in the backend-specific class.'
def _add_agent(self, agent):
raise NotImplementedError
'Gets an agent from its (backend-specific) unique identifier. This is implemented in the backend-specific class.'
def get_agent(self, agentid):
raise NotImplementedError
'Removes an agent from its (backend-specific) unique identifier.'
def del_agent(self, agentid, wait_results=True):
agent = self.get_agent(agentid) master = self.get_master(agent['master']) self.unassign_agent(agentid, dont_reuse=True) path = self.get_local_path(agent, 'input') dstdir = os.path.join(master['path'], 'onhold') for fname in os.listdir(path): shutil.move(os.path.join(path, fname), dstdir) if wait_results: self.sync(agentid)
'Removes an agent\'s database entry from its (backend-specific) unique identifier. This is implemented in the backend-specific class.'
def _del_agent(self, agentid):
raise NotImplementedError
'Prepares a master and adds it to the DB using `self._add_master()`'
def add_master(self, hostname, path):
master = {'hostname': hostname, 'path': path} return self._add_master(master)
'Adds a master and returns its (backend-specific) unique identifier. This is implemented in the backend-specific class.'
def _add_master(self, master):
raise NotImplementedError
'Given a query spec, return an appropriate index in a form suitable to be passed to Cursor.hint().'
def get_hint(self, spec):
for (fieldname, hint) in viewitems(self.hint_indexes): if (fieldname in spec): return hint
'The DB connection.'
@property def db_client(self):
try: return self._db_client except AttributeError: self._db_client = pymongo.MongoClient(host=self.host, read_preference=pymongo.ReadPreference.SECONDARY_PREFERRED) return self._db_client
'The DB.'
@property def db(self):
try: return self._db except AttributeError: self._db = self.db_client[self.dbname] if (self.username is not None): if (self.password is not None): self.db.authenticate(self.username, self.password) elif (self.mechanism is not None): self.db.authenticate(self.username, mechanism=self.mechanism) else: raise TypeError("provide either 'password' or 'mechanism' with 'username'") return self._db
'Server information.'
@property def server_info(self):
try: return self._server_info except AttributeError: self._server_info = self.db_client.server_info() return self._server_info
'Wrapper around column .find() method, depending on pymongo version.'
@property def find(self):
try: return self._find except AttributeError: if (pymongo.version_tuple[0] > 2): def _find(colname, *args, **kargs): if ('spec' in kargs): kargs['filter'] = kargs.pop('spec') if ('fields' in kargs): kargs['projection'] = kargs.pop('fields') return self.db[colname].find(*args, **kargs) self._find = _find else: def _find(colname, *args, **kargs): return self.db[colname].find(*args, **kargs) self._find = _find return self._find
'Wrapper around collection .find_one() method, depending on pymongo version.'
@property def find_one(self):
try: return self._find_one except AttributeError: if (pymongo.version_tuple[0] > 2): def _find_one(colname, *args, **kargs): if ('spec_or_id' in kargs): kargs['filter_or_id'] = kargs.pop('spec_or_id') if ('fields' in kargs): kargs['projection'] = kargs.pop('fields') return self.db[colname].find_one(*args, **kargs) self._find_one = _find_one else: def _find_one(colname, *args, **kargs): return self.db[colname].find_one(*args, **kargs) self._find_one = _find_one return self._find_one
'Process to schema migrations in column `colname` starting from `version`.'
def migrate_schema(self, colname, version):
failed = 0 while (version in self.schema_migrations[colname]): updated = False (new_version, migration_function) = self.schema_migrations[colname][version] utils.LOGGER.info('Migrating column %s from version %r to %r', colname, version, new_version) for record in self.find(colname, self.searchversion(version)): try: update = migration_function(record) except Exception as exc: utils.LOGGER.warning('Cannot migrate host %s [%s: %s]', record['_id'], exc.__class__.__name__, exc.message) failed += 1 else: if (update is not None): updated = True self.db[colname].update({'_id': record['_id']}, update) if updated: for (action, indexes) in viewitems(self.schema_migrations_indexes[colname].get(new_version, {})): function = getattr(self.db[colname], ('%s_index' % action)) for idx in indexes: try: function(idx[0], **idx[1]) except pymongo.errors.OperationFailure as exc: utils.LOGGER.warning('Cannot %s index %s [%s: %s]', action, idx, exc.__class__.__name__, exc.message) utils.LOGGER.info('Migration of column %s from version %r to %r DONE', colname, version, new_version) version = new_version if failed: utils.LOGGER.info('Failed to migrate %d documents', failed)
'Returns 0 if the `document`\'s schema version matches the code\'s current version for `colname`, -1 if it is higher (you need to update IVRE), and 1 if it is lower (you need to call .migrate_schema()).'
def cmp_schema_version(self, colname, document):
val1 = self.schema_latest_versions.get(colname, 0) val2 = document.get('schema_version', 0) return ((val1 > val2) - (val1 < val2))
'This method makes use of the aggregation framework to produce top values for a given field.'
def _topvalues(self, field, flt=None, topnbr=10, sort=None, limit=None, skip=None, least=False, aggrflt=None, specialproj=None, specialflt=None, countfield=None):
if (flt is None): flt = self.flt_empty if (aggrflt is None): aggrflt = self.flt_empty if (specialflt is None): specialflt = [] pipeline = [] if flt: pipeline += [{'$match': flt}] if ((sort is not None) and ((limit is not None) or (skip is not None))): pipeline += [{'$sort': OrderedDict(sort)}] if (skip is not None): pipeline += [{'$skip': skip}] if (limit is not None): pipeline += [{'$limit': limit}] project = ({'_id': 0, field: 1} if (specialproj is None) else specialproj) if (countfield is not None): project[countfield] = 1 pipeline += [{'$project': project}] for i in range(field.count('.'), (-1), (-1)): subfield = field.rsplit('.', i)[0] if (subfield in self.needunwind): pipeline += [{'$unwind': ('$' + subfield)}] pipeline += specialflt project = {'field': ('$%s' % field)} if (countfield is not None): project['count'] = ('$%s' % countfield) pipeline += [{'$project': project}] if aggrflt: pipeline += [{'$match': aggrflt}] else: pipeline += [{'$match': {'field': {'$exists': True}}}] pipeline += [{'$group': {'_id': '$field', 'count': {'$sum': (1 if (countfield is None) else '$count')}}}] if least: pipeline += [{'$sort': {'count': 1}}] else: pipeline += [{'$sort': {'count': (-1)}}] if (topnbr is not None): pipeline += [{'$limit': topnbr}] return pipeline
'This method makes use of the aggregation framework to produce distinct values for a given field.'
def _distinct(self, field, flt=None, sort=None, limit=None, skip=None):
pipeline = [] if flt: pipeline.append({'$match': flt}) if sort: pipeline.append({'$sort': OrderedDict(sort)}) if (skip is not None): pipeline += [{'$skip': skip}] if (limit is not None): pipeline += [{'$limit': limit}] for i in range(field.count('.'), (-1), (-1)): subfield = field.rsplit('.', i)[0] if (subfield in self.needunwind): pipeline += [{'$unwind': ('$' + subfield)}] pipeline.append({'$group': {'_id': ('$%s' % field)}}) return pipeline
'Returns a filter which will accept results if and only if they are accepted by both cond1 and cond2.'
@staticmethod def _flt_and(cond1, cond2):
cond1k = set(cond1) cond2k = set(cond2) cond = {} if ('$and' in cond1): cond1k.remove('$and') cond['$and'] = cond1['$and'] if ('$and' in cond2): cond2k.remove('$and') cond['$and'] = (cond.get('$and', []) + cond2['$and']) for k in cond1k.difference(cond2k): cond[k] = cond1[k] for k in cond2k.difference(cond1k): cond[k] = cond2[k] for k in cond1k.intersection(cond2k): if (cond1[k] == cond2[k]): cond[k] = cond1[k] else: cond['$and'] = (cond.get('$and', []) + [{k: cond1[k]}, {k: cond2[k]}]) return cond
'Filters records by their ObjectID. `oid` can be a single or many (as a list or any iterable) object ID(s), specified as strings or an `ObjectID`s.'
@staticmethod def searchobjectid(oid, neg=False):
if isinstance(oid, (basestring, bson.objectid.ObjectId)): oid = [bson.objectid.ObjectId(oid)] else: oid = [bson.objectid.ObjectId(elt) for elt in oid] if (len(oid) == 1): return {'_id': ({'$ne': oid[0]} if neg else oid[0])} return {'_id': {('$nin' if neg else '$in'): oid}}
'Filters documents based on their schema\'s version.'
@staticmethod def searchversion(version):
return {'schema_version': ({'$exists': False} if (version is None) else version)}
'Filters (if `neg` == True, filters out) one particular host (IP address).'
@staticmethod def searchhost(addr, neg=False):
try: addr = utils.ip2int(addr) except (TypeError, utils.socket.error): pass return {'addr': ({'$ne': addr} if neg else addr)}
'Filters (if `neg` == True, filters out) one particular IP address range.'
@staticmethod def searchrange(start, stop, neg=False):
try: start = utils.ip2int(start) except (TypeError, utils.socket.error): pass try: stop = utils.ip2int(stop) except (TypeError, utils.socket.error): pass if neg: return {'$or': [{'addr': {'$lt': start}}, {'addr': {'$gt': stop}}]} return {'addr': {'$gte': start, '$lte': stop}}
'Initializes the "active" columns, i.e., drops those columns and creates the default indexes.'
def init(self):
self.db[self.colname_scans].drop() self.db[self.colname_hosts].drop() self.db[self.colname_oldscans].drop() self.db[self.colname_oldhosts].drop() self.create_indexes()
'Returns 0 if the `host`\'s schema version matches the code\'s current version, -1 if it is higher (you need to update IVRE), and 1 if it is lower (you need to call .migrate_schema()).'
def cmp_schema_version_host(self, host):
return self.cmp_schema_version(self.colname_hosts, host)
'Returns 0 if the `scan`\'s schema version matches the code\'s current version, -1 if it is higher (you need to update IVRE), and 1 if it is lower (you need to call .migrate_schema()).'
def cmp_schema_version_scan(self, scan):
return self.cmp_schema_version(self.colname_scans, scan)
'Process to schema migrations in column `colname_hosts` or `colname_oldhosts` depending on `archive`archive value, starting from `version`.'
def migrate_schema(self, archive, version):
MongoDB.migrate_schema(self, (self.colname_oldhosts if archive else self.colname_hosts), version)
'Converts a record from version 0 (no "schema_version" key in the document) to version 1 (`doc["schema_version"] == 1`). Version 1 adds an "openports" nested document to ease open ports based researches.'
def migrate_schema_hosts_0_1(self, doc):
assert ('schema_version' not in doc) assert ('openports' not in doc) update = {'$set': {'schema_version': 1}} updated_ports = False openports = {} for port in doc.get('ports', []): if (port.get('state_state') == 'open'): openports.setdefault(port['protocol'], {}).setdefault('ports', []).append(port['port']) if (('screenshot' in port) and ('screenwords' not in port)): screenwords = utils.screenwords(self.getscreenshot(port)) if (screenwords is not None): port['screenwords'] = screenwords updated_ports = True for proto in list(openports): count = len(openports[proto]['ports']) openports[proto]['count'] = count openports['count'] = (openports.get('count', 0) + count) if (not openports): openports['count'] = 0 if updated_ports: update['$set']['ports'] = doc['ports'] update['$set']['openports'] = openports return update
'Converts a record from version 1 to version 2. Version 2 discards service names when they have been found from nmap-services file.'
@staticmethod def migrate_schema_hosts_1_2(doc):
assert (doc['schema_version'] == 1) update = {'$set': {'schema_version': 2}} update_ports = False for port in doc.get('ports', []): if (port.get('service_method') == 'table'): update_ports = True for key in list(port): if key.startswith('service_'): del port[key] if update_ports: update['$set']['ports'] = doc['ports'] return update
'Converts a record from version 2 to version 3. Version 3 uses new Nmap structured data for scripts using the ls library.'
@staticmethod def migrate_schema_hosts_2_3(doc):
assert (doc['schema_version'] == 2) update = {'$set': {'schema_version': 3}} updated_ports = False updated_scripts = False migrate_scripts = set(['afp-ls', 'nfs-ls', 'smb-ls', 'ftp-anon', 'http-ls']) for port in doc.get('ports', []): for script in port.get('scripts', []): if (script['id'] in migrate_scripts): if (script['id'] in script): script['ls'] = xmlnmap.change_ls(script.pop(script['id'])) updated_ports = True elif ('ls' not in script): data = xmlnmap.add_ls_data(script) if (data is not None): script['ls'] = data updated_ports = True for script in doc.get('scripts', []): if (script['id'] in migrate_scripts): data = xmlnmap.add_ls_data(script) if (data is not None): script['ls'] = data updated_scripts = True if updated_ports: update['$set']['ports'] = doc['ports'] if updated_scripts: update['$set']['scripts'] = doc['scripts'] return update
'Converts a record from version 3 to version 4. Version 4 creates a "fake" port entry to store host scripts.'
@staticmethod def migrate_schema_hosts_3_4(doc):
assert (doc['schema_version'] == 3) update = {'$set': {'schema_version': 4}} if ('scripts' in doc): doc.setdefault('ports', []).append({'port': 'host', 'scripts': doc.pop('scripts')}) update['$set']['ports'] = doc['ports'] update['$unset'] = {'scripts': True} return update
'Converts a record from version 4 to version 5. Version 5 uses the magic value -1 instead of "host" for "port" in the "fake" port entry used to store host scripts (see `migrate_schema_hosts_3_4()`). Moreover, it changes the structure of the values of "extraports" from [totalcount, {"state": count}] to {"total": totalcount, "state": count}.'
@staticmethod def migrate_schema_hosts_4_5(doc):
assert (doc['schema_version'] == 4) update = {'$set': {'schema_version': 5}} updated_ports = False updated_extraports = False for port in doc.get('ports', []): if (port['port'] == 'host'): port['port'] = (-1) updated_ports = True if updated_ports: update['$set']['ports'] = doc['ports'] for (state, (total, counts)) in list(viewitems(doc.get('extraports', {}))): doc['extraports'][state] = {'total': total, 'reasons': counts} updated_extraports = True if updated_extraports: update['$set']['extraports'] = doc['extraports'] return update
'Converts a record from version 5 to version 6. Version 6 uses Nmap structured data for scripts using the vulns NSE library.'
@staticmethod def migrate_schema_hosts_5_6(doc):
assert (doc['schema_version'] == 5) update = {'$set': {'schema_version': 6}} updated = False migrate_scripts = set((script for (script, alias) in viewitems(xmlnmap.ALIASES_TABLE_ELEMS) if (alias == 'vulns'))) for port in doc.get('ports', []): for script in port.get('scripts', []): if (script['id'] in migrate_scripts): table = None if (script['id'] in script): table = script.pop(script['id']) script['vulns'] = table updated = True elif ('vulns' in script): table = script['vulns'] else: continue newtable = xmlnmap.change_vulns(table) if (newtable != table): script['vulns'] = newtable updated = True if updated: update['$set']['ports'] = doc['ports'] return update
'Converts a record from version 6 to version 7. Version 7 creates a structured output for mongodb-databases script.'
@staticmethod def migrate_schema_hosts_6_7(doc):
assert (doc['schema_version'] == 6) update = {'$set': {'schema_version': 7}} updated = False for port in doc.get('ports', []): for script in port.get('scripts', []): if (script['id'] == 'mongodb-databases'): if ('mongodb-databases' not in script): data = xmlnmap.add_mongodb_databases_data(script) if (data is not None): script['mongodb-databases'] = data updated = True if updated: update['$set']['ports'] = doc['ports'] return update
'Converts a record from version 7 to version 8. Version 8 fixes the structured output for scripts using the vulns NSE library.'
@staticmethod def migrate_schema_hosts_7_8(doc):
assert (doc['schema_version'] == 7) update = {'$set': {'schema_version': 8}} updated = False for port in doc.get('ports', []): for script in port.get('scripts', []): if ('vulns' in script): if any(((elt in script['vulns']) for elt in ['ids', 'refs', 'description', 'state', 'title'])): script['vulns'] = [script['vulns']] else: script['vulns'] = [dict(tab, id=vulnid) for (vulnid, tab) in viewitems(script['vulns'])] updated = True if updated: update['$set']['ports'] = doc['ports'] return update
'Queries the active column (the old one if "archive" is set to True) with the provided filter "flt", and returns a MongoDB cursor. This should be very fast, as no operation is done (the cursor is only returned). Next operations (e.g., .count(), enumeration, etc.) might take a long time, depending on both the operations and the filter. Any keyword argument other than "archive" is passed to the .find() method of the Mongodb column object, without any validation (and might have no effect if it is not expected).'
def get(self, flt, archive=False, **kargs):
return self.set_limits(self.find((self.colname_oldhosts if archive else self.colname_hosts), flt, **kargs))
'Sets the content of a port\'s screenshot.'
def setscreenshot(self, host, port, data, protocol='tcp', archive=False, overwrite=False):
try: port = [p for p in host.get('ports', []) if ((p['port'] == port) and (p['protocol'] == protocol))][0] except IndexError: raise KeyError(('Port %s/%d does not exist' % (protocol, port))) if (('screenshot' in port) and (not overwrite)): return port['screenshot'] = 'field' trim_result = utils.trim_image(data) if (trim_result is False): return elif (trim_result is not True): data = trim_result port['screendata'] = bson.Binary(data) screenwords = utils.screenwords(data) if (screenwords is not None): port['screenwords'] = screenwords self.db[(self.colname_oldhosts if archive else self.colname_hosts)].update({'_id': host['_id']}, {'$set': {'ports': host['ports']}})
'Sets the `screenwords` attribute based on the screenshot data.'
def setscreenwords(self, host, port=None, protocol='tcp', archive=False, overwrite=False):
if (port is None): if overwrite: flt_cond = (lambda p: ('screenshot' in p)) else: flt_cond = (lambda p: (('screenshot' in p) and ('screenwords' not in p))) elif overwrite: flt_cond = (lambda p: (('screenshot' in p) and (p.get('port') == port) and (p.get('protocol') == protocol))) else: flt_cond = (lambda p: (('screenshot' in p) and ('screenwords' not in p) and (p.get('port') == port) and (p.get('protocol') == protocol))) updated = False for port in host.get('ports', []): if (not flt_cond(port)): continue screenwords = utils.screenwords(self.getscreenshot(port)) if (screenwords is not None): port['screenwords'] = screenwords updated = True if updated: self.db[(self.colname_oldhosts if archive else self.colname_hosts)].update({'_id': host['_id']}, {'$set': {'ports': host['ports']}})
'Removes screenshots'
def removescreenshot(self, host, port=None, protocol='tcp', archive=False):
changed = False for p in host.get('ports', []): if ((port is None) or ((p['port'] == port) and (p.get('protocol') == protocol))): if ('screenshot' in p): if (p['screenshot'] == 'field'): if ('screendata' in p): del p['screendata'] if ('screenwords' in p): del p['screenwords'] del p['screenshot'] changed = True if changed: self.db[(self.colname_oldhosts if archive else self.colname_hosts)].update({'_id': host['_id']}, {'$set': {'ports': host['ports']}})
'Merge two host records and return the result. Unmergeable / hard-to-merge fields are lost (e.g., extraports).'
def merge_host_docs(self, rec1, rec2):
if (rec1.get('schema_version') != rec2.get('schema_version')): raise ValueError(('Cannot merge host documents. Schema versions differ (%r != %r)' % (rec1.get('schema_version'), rec2.get('schema_version')))) rec = {} if ('schema_version' in rec1): rec['schema_version'] = rec1['schema_version'] if (rec1.get('starttime') > rec2.get('starttime')): (rec1, rec2) = (rec2, rec1) scanid = set() for record in [rec1, rec2]: scanid.update(self.getscanids(record)) if scanid: if (len(scanid) == 1): rec['scanid'] = scanid.pop() else: rec['scanid'] = list(scanid) for (fname, function) in [('starttime', min), ('endtime', max)]: try: rec[fname] = function((record[fname] for record in [rec1, rec2] if (fname in record))) except ValueError: pass rec['state'] = ('up' if (rec1.get('state') == 'up') else rec2.get('state')) if (rec['state'] is None): del rec['state'] rec['categories'] = list(set(rec1.get('categories', [])).union(rec2.get('categories', []))) for field in ['addr', 'source', 'os']: rec[field] = (rec2[field] if rec2.get(field) else rec1.get(field)) if (not rec[field]): del rec[field] rec['traces'] = (rec1.get('traces', []) + rec2.get('traces', [])) rec['infos'] = {} for record in [rec1, rec2]: rec['infos'].update(record.get('infos', {})) hostnames = dict((((h['type'], h['name']), h.get('domains')) for h in (rec1.get('hostnames', []) + rec2.get('hostnames', [])))) rec['hostnames'] = [{'type': h[0], 'name': h[1], 'domains': d} for (h, d) in viewitems(hostnames)] ports = dict((((port.get('protocol'), port['port']), port.copy()) for port in rec2.get('ports', []))) for port in rec1.get('ports', []): if ((port.get('protocol'), port['port']) in ports): curport = ports[(port.get('protocol'), port['port'])] if ('scripts' in curport): curport['scripts'] = curport['scripts'][:] else: curport['scripts'] = [] present_scripts = set((script['id'] for script in curport['scripts'])) for script in port.get('scripts', []): if (script['id'] not in present_scripts): curport['scripts'].append(script) if (not curport['scripts']): del curport['scripts'] if (('service_name' in port) and (not ('service_name' in curport))): for key in port: if key.startswith('service_'): curport[key] = port[key] else: ports[(port.get('protocol'), port['port'])] = port rec['ports'] = list(viewvalues(ports)) rec['openports'] = {} for record in [rec1, rec2]: for proto in record.get('openports', {}): if (proto == 'count'): continue rec['openports'].setdefault(proto, {}).setdefault('ports', set()).update(record['openports'][proto]['ports']) if rec['openports']: for proto in list(rec['openports']): count = len(rec['openports'][proto]['ports']) rec['openports'][proto]['count'] = count rec['openports']['count'] = (rec['openports'].get('count', 0) + count) rec['openports'][proto]['ports'] = list(rec['openports'][proto]['ports']) else: rec['openports']['count'] = 0 for field in ['traces', 'infos', 'ports']: if (not rec[field]): del rec[field] return rec
'Removes the host "host" from the active (the old one if "archive" is set to True) column. "host" must be the host record as returned by MongoDB. If "host" has a "scanid" attribute, and if it refers to a scan that have no more host record after the deletion of "host", then the scan record is also removed.'
def remove(self, host, archive=False):
if archive: colname_hosts = self.colname_oldhosts colname_scans = self.colname_oldscans else: colname_hosts = self.colname_hosts colname_scans = self.colname_scans self.db[colname_hosts].remove(spec_or_id=host['_id']) for scanid in self.getscanids(host): if (self.find_one(colname_hosts, {'scanid': scanid}) is None): self.db[colname_scans].remove(spec_or_id=scanid)
'Archives (when `unarchive` is True, unarchives) a given host record. Also (un)archives the corresponding scan and removes the scan from the "not archived" (or "archived") scan collection if not there is no host left in the "not archived" (or "archived") host collumn.'
def archive(self, host, unarchive=False):
(col_from_hosts, col_from_scans, col_to_hosts, col_to_scans) = ((self.colname_oldhosts, self.colname_oldscans, self.colname_hosts, self.colname_scans) if unarchive else (self.colname_hosts, self.colname_scans, self.colname_oldhosts, self.colname_oldscans)) if (self.find_one(col_from_hosts, {'_id': host['_id']}) is None): utils.LOGGER.warning('Cannot %sarchive: host %s does not exist in %r', ('un' if unarchive else ''), host['_id'], col_from_hosts) self.db[col_to_hosts].insert(host) utils.LOGGER.debug('HOST %sARCHIVED: %s in %r', ('UN' if unarchive else ''), host['_id'], col_to_hosts) self.db[col_from_hosts].remove(spec_or_id=host['_id']) utils.LOGGER.debug('HOST REMOVED: %s from %r', host['_id'], col_from_hosts) for scanid in self.getscanids(host): scan = self.find_one(col_from_scans, {'_id': scanid}) if (scan is not None): if (self.find_one(col_to_scans, {'_id': scanid}) is None): self.db[col_to_scans].insert(scan) utils.LOGGER.debug('SCAN %sARCHIVED: %s in %r\n', ('UN' if unarchive else ''), scanid, col_to_scans) if (self.find_one(col_from_hosts, {'scanid': scanid}) is None): self.db[col_from_scans].remove(spec_or_id=scanid) utils.LOGGER.debug('SCAN REMOVED: %s in %r', scanid, col_from_scans)
'This method returns for a specific query `flt` a list of dictionary objects whose keys are `id` and `mean`; the value for `id` is a backend-dependant and uniquely identifies a record, and the value for `mean` is given by: (number of open ports) * sum(port number for each open port) This MongoDB specific implementation uses the aggregation framework to have most of the work done within the DB server. However, it is broken for now as it does not handle hosts with no open port but with a ports attribute. See * https://stackoverflow.com/questions/23636175 * https://stackoverflow.com/questions/22114748'
def get_mean_open_ports(self, flt, archive=False):
aggr = [] if flt: aggr += [{'$match': flt}] aggr += [{'$project': {'ports.port': 1, 'ports.state_state': 1}}, {'$project': {'ports': {'$ifNull': ['$ports', []]}}}, {'$redact': {'$cond': {'if': {'$eq': [{'$ifNull': ['$ports', None]}, None]}, 'then': {'$cond': {'if': {'$eq': ['$state_state', 'open']}, 'then': '$$KEEP', 'else': '$$PRUNE'}}, 'else': '$$DESCEND'}}}, {'$project': {'ports': {'$cond': [{'$eq': ['$ports', []]}, [0], '$ports.port']}}}, {'$unwind': '$ports'}, {'$group': {'_id': '$_id', 'count': {'$sum': 1}, 'ports': {'$sum': '$ports'}}}, {'$project': {'_id': 0, 'id': '$_id', 'mean': {'$multiply': ['$count', '$ports']}}}] return self.db[(self.colname_oldhosts if archive else self.colname_hosts)].aggregate(aggr, cursor={})
'Work-in-progress function to get scan results grouped by common open ports'
def group_by_port(self, flt, archive=False):
aggr = [] if flt: aggr += [{'$match': flt}] aggr += [{'$project': {'ports.port': 1, 'ports.state_state': 1}}, {'$project': {'ports': {'$ifNull': ['$ports', []]}}}, {'$redact': {'$cond': {'if': {'$eq': [{'$ifNull': ['$ports', None]}, None]}, 'then': {'$cond': {'if': {'$eq': ['$state_state', 'open']}, 'then': '$$KEEP', 'else': '$$PRUNE'}}, 'else': '$$DESCEND'}}}, {'$project': {'ports': {'$cond': [{'$eq': ['$ports', []]}, [0], '$ports.port']}}}, {'$group': {'_id': '$ports', 'ids': {'$addToSet': '$_id'}}}] return self.db[(self.colname_oldhosts if archive else self.colname_hosts)].aggregate(aggr, cursor={})
'Filters (if `neg` == True, filters out) one particular category (records may have zero, one or more categories).'
@staticmethod def searchcategory(cat, neg=False):
if neg: if isinstance(cat, utils.REGEXP_T): return {'categories': {'$not': cat}} if isinstance(cat, list): if (len(cat) == 1): cat = cat[0] else: return {'categories': {'$nin': cat}} return {'categories': {'$ne': cat}} if isinstance(cat, list): if (len(cat) == 1): cat = cat[0] else: return {'categories': {'$in': cat}} return {'categories': cat}
'Filters (if `neg` == True, filters out) one particular country, or a list of countries.'
@staticmethod def searchcountry(country, neg=False):
country = utils.country_unalias(country) if isinstance(country, list): return {'infos.country_code': {('$nin' if neg else '$in'): country}} return {'infos.country_code': ({'$ne': country} if neg else country)}
'Filters (if `neg` == True, filters out) one particular city.'
@staticmethod def searchcity(city, neg=False):
if neg: if isinstance(city, utils.REGEXP_T): return {'infos.city': {'$not': city}} return {'infos.city': {'$ne': city}} return {'infos.city': city}
'Filters (if `neg` == True, filters out) one or more particular AS number(s).'
@staticmethod def searchasnum(asnum, neg=False):
if ((not isinstance(asnum, basestring)) and hasattr(asnum, '__iter__')): return {'infos.as_num': {('$nin' if neg else '$in'): [int(val) for val in asnum]}} asnum = int(asnum) return {'infos.as_num': ({'$ne': asnum} if neg else asnum)}
'Filters (if `neg` == True, filters out) one or more particular AS.'
@staticmethod def searchasname(asname, neg=False):
if neg: if isinstance(asname, utils.REGEXP_T): return {'infos.as_name': {'$not': asname}} else: return {'infos.as_name': {'$ne': asname}} return {'infos.as_name': asname}
'Filters (if `neg` == True, filters out) one particular source.'
@staticmethod def searchsource(src, neg=False):
if neg: if isinstance(src, utils.REGEXP_T): return {'source': {'$not': src}} return {'source': {'$ne': src}} return {'source': src}
'Filters (if `neg` == True, filters out) records with specified protocol/port at required state. Be aware that when a host has a lot of ports filtered or closed, it will not report all of them, but only a summary, and thus the filter might not work as expected. This filter will always work to find open ports.'
@staticmethod def searchport(port, protocol='tcp', state='open', neg=False):
if (port == 'host'): return {'ports.port': ({'$gte': 0} if neg else (-1))} if (state == 'open'): return {('openports.%s.ports' % protocol): ({'$ne': port} if neg else port)} if neg: return {'$or': [{'ports': {'$elemMatch': {'port': port, 'protocol': protocol, 'state_state': {'$ne': state}}}}, {'ports.port': {'$ne': port}}]} return {'ports': {'$elemMatch': {'port': port, 'protocol': protocol, 'state_state': state}}}
'Filters records with at least one port other than those listed in `ports` with state `state`.'
def searchportsother(self, ports, protocol='tcp', state='open'):
return self.searchport(({'$elemMatch': {'$nin': ports}} if (state == 'open') else {'$nin': ports}), protocol=protocol, state=state)
'Filters records with open port number between minn and maxn'
@staticmethod def searchcountopenports(minn=None, maxn=None, neg=False):
assert ((minn is not None) or (maxn is not None)) flt = [] if (minn == maxn): return {'openports.count': ({'$ne': minn} if neg else minn)} if (minn is not None): flt.append({('$lt' if neg else '$gte'): minn}) if (maxn is not None): flt.append({('$gt' if neg else '$lte'): maxn}) if (len(flt) == 1): return {'openports.count': flt[0]} if neg: return {'$or': [{'openports.count': cond} for cond in flt]} return {'openports.count': {'$lte': maxn, '$gte': minn}}
'Filters records with at least one open port.'
@staticmethod def searchopenport(neg=False):
return {'ports.state_state': ({'$nin': ['open']} if neg else 'open')}
'Search an open port with a particular service.'
@staticmethod def searchservice(srv, port=None, protocol=None):
flt = {'service_name': srv} if (port is not None): flt['port'] = port if (protocol is not None): flt['protocol'] = protocol if (len(flt) == 1): return {'ports.service_name': srv} return {'ports': {'$elemMatch': flt}}
'Search a port with a particular `product`. It is (much) better to provide the `service` name and/or `port` number since those fields are indexed.'
@staticmethod def searchproduct(product, version=None, service=None, port=None, protocol=None):
flt = {'service_product': product} if (version is not None): flt['service_version'] = version if (service is not None): flt['service_name'] = service if (port is not None): flt['port'] = port if (protocol is not None): flt['protocol'] = protocol if (len(flt) == 1): return {'ports.service_product': product} return {'ports': {'$elemMatch': flt}}
'Search a particular content in the scripts results.'
@staticmethod def searchscript(name=None, output=None, values=None):
req = {} if (name is not None): req['id'] = name if (output is not None): req['output'] = output if (values is not None): if (name is None): raise TypeError('.searchscript() needs a `name` arg when using a `values` arg') for (field, value) in viewitems(values): req[('%s.%s' % (xmlnmap.ALIASES_TABLE_ELEMS.get(name, name), field))] = value if (not req): return {'ports.scripts': {'$exists': True}} if (len(req) == 1): (field, value) = next(iter(viewitems(req))) return {('ports.scripts.%s' % field): value} return {'ports.scripts': {'$elemMatch': req}}
'Search shared files from a file name (either a string or a regexp), only from scripts using the "ls" NSE module.'
def searchfile(self, fname=None, scripts=None):
if (fname is None): fname = {'$exists': True} if (scripts is None): return {'ports.scripts.ls.volumes.files.filename': fname} if isinstance(scripts, basestring): scripts = [scripts] return {'ports.scripts': {'$elemMatch': {'id': (scripts.pop() if (len(scripts) == 1) else {'$in': scripts}), 'ls.volumes.files.filename': fname}}}
'Filter SMB shares with given `access` (default: either read or write, accepted values \'r\', \'w\', \'rw\'). If `hidden` is set to `True`, look for hidden shares, for non-hidden if set to `False` and for both if set to `None` (this is the default).'
def searchsmbshares(self, access='', hidden=None):
access = {'': re.compile('^(READ|WRITE)'), 'r': re.compile('^READ(/|$)'), 'w': re.compile('(^|/)WRITE$'), 'rw': 'READ/WRITE', 'wr': 'READ/WRITE'}[access.lower()] share_type = {None: {'$nin': ['STYPE_IPC_HIDDEN', 'Not a file share', 'STYPE_IPC', 'STYPE_PRINTQ']}, True: 'STYPE_DISKTREE_HIDDEN', False: 'STYPE_DISKTREE'}[hidden] return self.searchscript(name='smb-enum-shares', values={'shares': {'$elemMatch': {'$or': [{('%s access' % user): access} for user in ['Anonymous', 'Current user']], 'Type': share_type, 'Share': {'$ne': 'IPC$'}}}})
'Filter results with (without, when `neg == True`) a screenshot (on a specific `port` if specified). `words` can be specified as a string, a regular expression, a boolean, or as a list and is/are matched against the OCR results. When `words` is specified and `neg == True`, the result will filter results **with** a screenshot **without** the word(s) in the OCR results.'
@staticmethod def searchscreenshot(port=None, protocol='tcp', service=None, words=None, neg=False):
result = {'ports': {'$elemMatch': {}}} if (words is None): if ((port is None) and (service is None)): return {'ports.screenshot': {'$exists': (not neg)}} result['ports']['$elemMatch']['screenshot'] = {'$exists': (not neg)} else: result['ports']['$elemMatch']['screenshot'] = {'$exists': True} if isinstance(words, list): words = {('$ne' if neg else '$all'): words} elif isinstance(words, utils.REGEXP_T): words = ({'$not': words} if neg else words) elif isinstance(words, bool): words = {'$exists': words} else: words = ({'$ne': words} if neg else words) result['ports']['$elemMatch']['screenwords'] = words if (port is not None): result['ports']['$elemMatch']['port'] = port result['ports']['$elemMatch']['protocol'] = protocol if (service is not None): result['ports']['$elemMatch']['service_name'] = service return result
'Look for a CPE by type (a, o or h), vendor, product or version (the part after the column following the product). No argument will just check for cpe existence.'
@staticmethod def searchcpe(cpe_type=None, vendor=None, product=None, version=None):
fields = [('type', cpe_type), ('vendor', vendor), ('product', product), ('version', version)] flt = dict(((field, value) for (field, value) in fields if (value is not None))) nflt = len(flt) if (nflt == 0): return {'cpes': {'$exists': True}} elif (nflt == 1): (field, value) = flt.popitem() return {('cpes.%s' % field): value} else: return {'cpes': {'$elemMatch': flt}}
'This method makes use of the aggregation framework to produce top values for a given field or pseudo-field. Pseudo-fields are: - category / asnum / country / net[:mask] - port - port:open / :closed / :filtered / :<servicename> - portlist:open / :closed / :filtered - countports:open / :closed / :filtered - service / service:<portnbr> - product / product:<portnbr> - cpe / cpe.<part> / cpe:<cpe_spec> / cpe.<part>:<cpe_spec> - devicetype / devicetype:<portnbr> - script:<scriptid> / script:<port>:<scriptid> / script:host:<scriptid> - cert.* / smb.* / sshkey.* / ike.* - modbus.* / s7.* / enip.* - mongo.dbs.* - vulns.* - screenwords - file.* / file.*:scriptid - hop'
def topvalues(self, field, flt=None, topnbr=10, sort=None, limit=None, skip=None, least=False, archive=False, aggrflt=None, specialproj=None, specialflt=None):
null_if_empty = (lambda val: (val if val else None)) outputproc = None if (flt is None): flt = self.flt_empty if (aggrflt is None): aggrflt = self.flt_empty if (specialflt is None): specialflt = [] if (field == 'category'): field = 'categories' elif (field == 'country'): flt = self.flt_and(flt, {'infos.country_code': {'$exists': True}}) field = 'infos.country_code' outputproc = (lambda x: {'count': x['count'], '_id': (x['_id'], self.globaldb.data.country_name_by_code(x['_id']))}) elif (field == 'city'): flt = self.flt_and(flt, {'infos.country_code': {'$exists': True}}, {'infos.city': {'$exists': True}}) specialproj = {'_id': 0, 'city': {'$concat': ['$infos.country_code', '###', '$infos.city']}} field = 'city' outputproc = (lambda x: {'count': x['count'], '_id': tuple(x['_id'].split('###', 1))}) elif (field == 'asnum'): flt = self.flt_and(flt, {'infos.as_num': {'$exists': True}}) field = 'infos.as_num' elif (field == 'as'): flt = self.flt_and(flt, {'infos.as_num': {'$exists': True}}) specialproj = {'_id': 0, 'as': {'$concat': [{'$toLower': '$infos.as_num'}, '###', {'$ifNull': ['$infos.as_name', '']}]}} field = 'as' outputproc = (lambda x: {'count': x['count'], '_id': ((None, None) if (x['_id'] is None) else tuple(((int(y) if (i == 0) else y) for (i, y) in enumerate(x['_id'].split('###')))))}) elif ((field == 'net') or field.startswith('net:')): field = 'addr' mask = (int(field.split(':', 1)[1]) if (':' in field) else 24) if (self.server_info['versionArray'] >= [3, 2]): specialproj = {'_id': 0, 'addr': {'$floor': {'$divide': ['$addr', (2 ** (32 - mask))]}}} else: specialproj = {'_id': 0, 'addr': {'$subtract': [{'$divide': ['$addr', (2 ** (32 - mask))]}, {'$mod': [{'$divide': ['$addr', (2 ** (32 - mask))]}, 1]}]}} outputproc = (lambda x: {'count': x['count'], '_id': ('%s/%d' % (utils.int2ip((x['_id'] * (2 ** (32 - mask)))), mask))}) elif ((field == 'port') or field.startswith('port:')): if (field == 'port'): info = {'$exists': True} flt_field = 'ports.state_state' else: info = field.split(':', 1)[1] flt_field = ('ports.%s' % ('state_state' if (info in ['open', 'filtered', 'closed']) else 'service_name')) field = 'ports.port' flt = self.flt_and(flt, {flt_field: info}) specialproj = {'_id': 0, flt_field: 1, field: 1, 'ports.protocol': 1} specialflt = [{'$match': {flt_field: info}}, {'$project': {field: {'$concat': ['$ports.protocol', '###', {'$toLower': '$ports.port'}]}}}] outputproc = (lambda x: {'count': x['count'], '_id': tuple(((int(y) if (i == 1) else y) for (i, y) in enumerate(x['_id'].split('###'))))}) elif field.startswith('portlist:'): specialproj = {'ports.port': 1, 'ports.protocol': 1, 'ports.state_state': 1} specialflt = [{'$project': {'ports.port': 1, 'ports.protocol': 1, 'ports.state_state': 1}}, {'$project': {'ports': {'$ifNull': ['$ports', []]}}}, {'$redact': {'$cond': {'if': {'$eq': [{'$ifNull': ['$ports', None]}, None]}, 'then': {'$cond': {'if': {'$eq': ['$state_state', field.split(':', 1)[1]]}, 'then': '$$KEEP', 'else': '$$PRUNE'}}, 'else': '$$DESCEND'}}}, {'$project': {'ports.port': 1, 'ports.protocol': 1}}, {'$project': {'portlist': '$ports'}}] field = 'portlist' outputproc = (lambda x: {'count': x['count'], '_id': [(y['protocol'], y['port']) for y in x['_id']]}) elif field.startswith('countports:'): state = field.split(':', 1)[1] if (state == 'open'): field = 'openports.count' else: specialproj = {'_id': 0, 'ports.state_state': 1} specialflt = [{'$project': {'ports': {'$ifNull': ['$ports', []]}}}, {'$redact': {'$cond': {'if': {'$eq': [{'$ifNull': ['$ports', None]}, None]}, 'then': {'$cond': {'if': {'$eq': ['$state_state', state]}, 'then': '$$KEEP', 'else': '$$PRUNE'}}, 'else': '$$DESCEND'}}}, {'$project': {'countports': {'$size': '$ports'}}}] field = 'countports' elif (field == 'service'): flt = self.flt_and(flt, self.searchopenport()) specialproj = {'_id': 0, 'ports.state_state': 1, 'ports.service_name': 1} specialflt = [{'$match': {'ports.state_state': 'open'}}, {'$project': {'ports.service_name': {'$ifNull': ['$ports.service_name', '']}}}] field = 'ports.service_name' outputproc = (lambda x: {'count': x['count'], '_id': (x['_id'] if x['_id'] else None)}) elif field.startswith('service:'): port = int(field.split(':', 1)[1]) flt = self.flt_and(flt, self.searchport(port)) specialproj = {'_id': 0, 'ports.port': 1, 'ports.service_name': 1} specialflt = [{'$match': {'ports.port': port}}, {'$project': {'ports.service_name': {'$ifNull': ['$ports.service_name', '']}}}] field = 'ports.service_name' elif (field == 'product'): flt = self.flt_and(flt, self.searchopenport()) specialproj = {'_id': 0, 'ports.state_state': 1, 'ports.service_name': 1, 'ports.service_product': 1} specialflt = [{'$match': {'ports.state_state': 'open'}}, {'$project': {'ports.service_product': {'$concat': [{'$ifNull': ['$ports.service_name', '']}, '###', {'$ifNull': ['$ports.service_product', '']}]}}}] field = 'ports.service_product' outputproc = (lambda x: {'count': x['count'], '_id': tuple(((elt if elt else None) for elt in x['_id'].split('###')))}) elif field.startswith('product:'): service = field.split(':', 1)[1] if service.isdigit(): port = int(service) flt = self.flt_and(flt, self.searchport(port)) specialflt = [{'$match': {'ports.port': port}}] else: flt = self.flt_and(flt, self.searchservice(service)) specialflt = [{'$match': {'ports.service_name': service}}] specialproj = {'_id': 0, 'ports.port': 1, 'ports.service_name': 1, 'ports.service_product': 1} specialflt.append({'$project': {'ports.service_product': {'$concat': [{'$ifNull': ['$ports.service_name', '']}, '###', {'$ifNull': ['$ports.service_product', '']}]}}}) field = 'ports.service_product' outputproc = (lambda x: {'count': x['count'], '_id': tuple(((elt if elt else None) for elt in x['_id'].split('###')))}) elif (field == 'version'): flt = self.flt_and(flt, self.searchopenport()) specialproj = {'_id': 0, 'ports.state_state': 1, 'ports.service_name': 1, 'ports.service_product': 1, 'ports.service_version': 1} specialflt = [{'$match': {'ports.state_state': 'open'}}, {'$project': {'ports.service_product': {'$concat': [{'$ifNull': ['$ports.service_name', '']}, '###', {'$ifNull': ['$ports.service_product', '']}, '###', {'$ifNull': ['$ports.service_version', '']}]}}}] field = 'ports.service_product' outputproc = (lambda x: {'count': x['count'], '_id': tuple(((elt if elt else None) for elt in x['_id'].split('###')))}) elif field.startswith('version:'): service = field.split(':', 1)[1] if service.isdigit(): port = int(service) flt = self.flt_and(flt, self.searchport(port)) specialflt = [{'$match': {'ports.port': port}}] elif (':' in service): (service, product) = service.split(':', 1) flt = self.flt_and(flt, self.searchproduct(product, service=service)) specialflt = [{'$match': {'ports.service_name': service, 'ports.service_product': product}}] else: flt = self.flt_and(flt, self.searchservice(service)) specialflt = [{'$match': {'ports.service_name': service}}] specialproj = {'_id': 0, 'ports.port': 1, 'ports.service_name': 1, 'ports.service_product': 1, 'ports.service_version': 1} specialflt.append({'$project': {'ports.service_product': {'$concat': [{'$ifNull': ['$ports.service_name', '']}, '###', {'$ifNull': ['$ports.service_product', '']}, '###', {'$ifNull': ['$ports.service_version', '']}]}}}) field = 'ports.service_product' outputproc = (lambda x: {'count': x['count'], '_id': tuple(((elt if elt else None) for elt in x['_id'].split('###')))}) elif field.startswith('cpe'): try: (field, cpeflt) = field.split(':', 1) cpeflt = cpeflt.split(':', 3) except ValueError: cpeflt = [] try: field = field.split('.', 1)[1] except IndexError: field = 'version' fields = ['type', 'vendor', 'product', 'version'] if (field not in fields): try: field = fields[(int(field) - 1)] except (IndexError, ValueError): field = 'version' cpeflt = zip(fields, (utils.str2regexp(value) for value in cpeflt)) cpeflt1 = self.searchcpe(**dict(((('cpe_type' if (key == 'type') else key), value) for (key, value) in cpeflt))) cpeflt2 = dict(((('cpes.%s' % key), value) for (key, value) in cpeflt)) fields = fields[:(max(fields.index(field), len(cpeflt)) + 1)] flt = self.flt_and(flt, cpeflt1) specialproj = dict(((('cpes.%s' % fname), 1) for fname in fields)) specialproj['_id'] = 0 concat = [('$cpes.%s' % fields[0])] for fname in fields[1:(fields.index(field) + 1)]: concat.append(':') concat.append(('$cpes.%s' % fname)) specialflt = [] if cpeflt2: specialflt.append({'$match': cpeflt2}) specialflt.append({'$project': {('cpes.%s' % field): {'$concat': concat}}}) field = ('cpes.%s' % field) outputproc = (lambda x: {'count': x['count'], '_id': tuple(x['_id'].split(':', 3))}) elif (field == 'devicetype'): field = 'ports.service_devicetype' elif field.startswith('devicetype:'): port = int(field.split(':', 1)[1]) flt = self.flt_and(flt, self.searchport(port)) specialproj = {'_id': 0, 'ports.port': 1, 'ports.service_devicetype': 1} specialflt = [{'$match': {'ports.port': port}}, {'$project': {'ports.service_devicetype': 1}}] field = 'ports.service_devicetype' elif field.startswith('smb.'): flt = self.flt_and(flt, self.searchscript(name='smb-os-discovery')) if (field == 'smb.dnsdomain'): field = 'ports.scripts.smb-os-discovery.domain_dns' elif (field == 'smb.forest'): field = 'ports.scripts.smb-os-discovery.forest_dns' else: field = ('ports.scripts.smb-os-discovery.' + field[4:]) elif (field == 'script'): flt = self.flt_and(flt, self.searchscript(name={'$exists': True})) field = 'ports.scripts.id' elif field.startswith('script:'): scriptid = field.split(':', 1)[1] flt = self.flt_and(flt, self.searchscript(name={'$exists': True})) if (':' in scriptid): (port, scriptid) = scriptid.split(':', 1) if port.isdigit(): port = int(port) flt = self.flt_and(flt, self.searchport(port)) else: (port, scriptid) = (None, field.split(':', 1)[1]) specialproj = {'_id': 0, 'ports.scripts.id': 1, 'ports.scripts.output': 1} if (port is not None): specialproj.update({'ports.port': 1}) specialflt = [{'$match': ({'ports.scripts.id': scriptid} if (port is None) else {'ports.scripts.id': scriptid, 'ports.port': port})}, {'$project': {'ports.scripts.output': 1}}] field = 'ports.scripts.output' elif (field == 'domains'): flt = self.flt_and(flt, self.searchdomain({'$exists': True})) field = 'hostnames.domains' elif field.startswith('domains:'): flt = self.flt_and(flt, self.searchdomain({'$exists': True})) level = (int(field[8:]) - 1) field = 'hostnames.domains' aggrflt = {'field': re.compile(('^([^\\.]+\\.){%d}[^\\.]+$' % level))} elif field.startswith('cert.'): subfield = field[5:] field = ('ports.scripts.ssl-cert.' + subfield) elif (field == 'sshkey.bits'): flt = self.flt_and(flt, self.searchsshkey()) specialproj = {'ports.scripts.ssh-hostkey.type': 1, 'ports.scripts.ssh-hostkey.bits': 1} specialflt = [{'$project': {'_id': 0, 'ports.scripts.ssh-hostkey.bits': {'$concat': ['$ports.scripts.ssh-hostkey.type', '###', '$ports.scripts.ssh-hostkey.bits']}}}] field = 'ports.scripts.ssh-hostkey.bits' outputproc = (lambda x: {'count': x['count'], '_id': tuple(x['_id'].split('###'))}) elif field.startswith('sshkey.'): flt = self.flt_and(flt, self.searchsshkey()) subfield = field[7:] field = ('ports.scripts.ssh-hostkey.' + subfield) elif (field == 'ike.vendor_ids'): flt = self.flt_and(flt, self.searchscript(name='ike-info')) specialproj = {'ports.scripts.ike-info.vendor_ids.value': 1, 'ports.scripts.ike-info.vendor_ids.name': 1} specialflt = [{'$project': {'_id': 0, 'ports.scripts.ike-info.vendor_ids': {'$concat': ['$ports.scripts.ike-info.vendor_ids.value', '###', {'$ifNull': ['$ports.scripts.ike-info.vendor_ids.name', '']}]}}}] field = 'ports.scripts.ike-info.vendor_ids' outputproc = (lambda x: {'count': x['count'], '_id': tuple((null_if_empty(val) for val in x['_id'].split('###')))}) elif (field == 'ike.transforms'): flt = self.flt_and(flt, self.searchscript(name='ike-info', values={'transforms': {'$exists': True}})) specialproj = {'ports.scripts.ike-info.transforms.Authentication': 1, 'ports.scripts.ike-info.transforms.Encryption': 1, 'ports.scripts.ike-info.transforms.GroupDesc': 1, 'ports.scripts.ike-info.transforms.Hash': 1, 'ports.scripts.ike-info.transforms.LifeDuration': 1, 'ports.scripts.ike-info.transforms.LifeType': 1} specialflt = [{'$project': {'_id': 0, 'ports.scripts.ike-info.transforms': {'$concat': [{'$ifNull': ['$ports.scripts.ike-info.transforms.Authentication', '']}, '###', {'$ifNull': ['$ports.scripts.ike-info.transforms.Encryption', '']}, '###', {'$ifNull': ['$ports.scripts.ike-info.transforms.GroupDesc', '']}, '###', {'$ifNull': ['$ports.scripts.ike-info.transforms.Hash', '']}, '###', {'$toLower': '$ports.scripts.ike-info.transforms.LifeDuration'}, '###', {'$ifNull': ['$ports.scripts.ike-info.transforms.LifeType', '']}]}}}] field = 'ports.scripts.ike-info.transforms' outputproc = (lambda x: {'count': x['count'], '_id': tuple((null_if_empty(val) for val in x['_id'].split('###')))}) elif (field == 'ike.notification'): flt = self.flt_and(flt, self.searchscript(name='ike-info', values={'notification_type': {'$exists': True}})) field = 'ports.scripts.ike-info.notification_type' elif field.startswith('ike.'): flt = self.flt_and(flt, self.searchscript(name='ike-info')) field = ('ports.scripts.ike-info.' + field[4:]) elif field.startswith('modbus.'): flt = self.flt_and(flt, self.searchscript(name='modbus-discover')) subfield = field[7:] field = ('ports.scripts.modbus-discover.' + subfield) elif field.startswith('s7.'): flt = self.flt_and(flt, self.searchscript(name='s7-info')) subfield = field[3:] field = ('ports.scripts.s7-info.' + subfield) elif field.startswith('enip.'): flt = self.flt_and(flt, self.searchscript(name='enip-info')) subfield = field[5:] subfield = {'vendor': 'Vendor', 'product': 'Product Name', 'serial': 'Serial Number', 'devtype': 'Device Type', 'prodcode': 'Product Code', 'rev': 'Revision', 'ip': 'Device IP'}.get(subfield, subfield) field = ('ports.scripts.enip-info.' + subfield) elif field.startswith('mongo.dbs.'): flt = self.flt_and(flt, self.searchscript(name='mongodb-databases')) field = ('ports.scripts.mongodb-databases.' + field[10:]) elif field.startswith('vulns.'): flt = self.flt_and(flt, self.searchvuln()) subfield = field[6:] if (subfield == 'id'): field = 'ports.scripts.vulns.id' else: field = ('ports.scripts.vulns.' + subfield) specialproj = {'_id': 0, 'ports.scripts.vulns.id': 1, field: 1} specialflt = [{'$project': {'_id': 0, field: {'$concat': ['$ports.scripts.vulns.id', '###', ('$' + field)]}}}] outputproc = (lambda x: {'count': x['count'], '_id': tuple(x['_id'].split('###', 1))}) elif ((field == 'file') or (field.startswith('file') and (field[4] in '.:'))): if field.startswith('file:'): scripts = field[5:] if ('.' in scripts): (scripts, field) = scripts.split('.', 1) else: field = 'filename' scripts = scripts.split(',') else: field = (field[5:] or 'filename') scripts = None flt = self.flt_and(flt, self.searchfile(scripts=scripts)) field = ('ports.scripts.ls.volumes.files.%s' % field) if (scripts is not None): specialproj = {'_id': 0, field: 1, 'ports.scripts.id': 1} specialflt = [{'$match': {'ports.scripts.id': flt['ports.scripts']['$elemMatch']['id']}}, {'$project': {field: {'$ifNull': [('$' + field), '']}}}] else: specialflt = [{'$project': {field: {'$ifNull': [('$' + field), '']}}}] outputproc = (lambda x: {'count': x['count'], '_id': (x['_id'] if x['_id'] else None)}) elif (field == 'screenwords'): field = 'ports.screenwords' flt = self.flt_and(flt, self.searchscreenshot(words=True)) elif (field == 'hop'): field = 'traces.hops.ipaddr' outputproc = (lambda x: {'count': x['count'], '_id': utils.int2ip(x['_id'])}) elif (field.startswith('hop') and (field[3] in ':>')): specialproj = {'_id': 0, 'traces.hops.ipaddr': 1, 'traces.hops.ttl': 1} specialflt = [{'$match': {'traces.hops.ttl': (int(field[4:]) if (field[3] == ':') else {'$gt': int(field[4:])})}}, {'$project': {'traces.hops.ipaddr': 1}}] field = 'traces.hops.ipaddr' outputproc = (lambda x: {'count': x['count'], '_id': utils.int2ip(x['_id'])}) pipeline = self._topvalues(field, flt=flt, topnbr=topnbr, sort=sort, limit=limit, skip=skip, least=least, aggrflt=aggrflt, specialproj=specialproj, specialflt=specialflt) cursor = self.set_limits(self.db[(self.colname_oldhosts if archive else self.colname_hosts)].aggregate(pipeline, cursor={})) if (outputproc is not None): return (outputproc(res) for res in cursor) return cursor
'This method makes use of the aggregation framework to produce distinct values for a given field.'
def distinct(self, field, flt=None, sort=None, limit=None, skip=None, archive=False):
cursor = self.set_limits(self.db[(self.colname_oldhosts if archive else self.colname_hosts)].aggregate(self._distinct(field, flt=flt, sort=sort, limit=limit, skip=skip), cursor={})) return (res['_id'] for res in cursor)
'`category1` and `category2` must be categories (provided as str or unicode objects) Returns a generator of tuples: ({\'addr\': address, \'proto\': protocol, \'port\': port}, value) Where `address` is an integer (use `utils.int2ip` to get the corresponding string), and value is: - -1 if the port is open in category1 and not in category2, - 0 if the port is open in both category1 and category2, - 1 if the port is open in category2 and not in category1. This can be useful to compare open ports from two scan results against the same targets.'
def diff_categories(self, category1, category2, flt=None, archive=False, include_both_open=True):
category_filter = self.searchcategory([category1, category2]) pipeline = [{'$match': (category_filter if (flt is None) else self.flt_and(flt, category_filter))}, {'$unwind': '$categories'}, {'$match': category_filter}, {'$unwind': '$ports'}, {'$match': {'ports.state_state': 'open'}}, {'$project': {'_id': 0, 'addr': 1, 'ports.protocol': 1, 'ports.port': 1, 'categories': 1}}, {'$group': {'_id': {'addr': '$addr', 'proto': '$ports.protocol', 'port': '$ports.port'}, 'categories': {'$push': '$categories'}}}] cursor = self.db[(self.colname_oldhosts if archive else self.colname_hosts)].aggregate(pipeline, cursor={}) def categories_to_val(categories): (state1, state2) = ((category1 in categories), (category2 in categories)) return ((state2 > state1) - (state2 < state1)) cursor = (dict(x['_id'], value=categories_to_val(x['categories'])) for x in cursor) if include_both_open: return cursor else: return (result for result in cursor if result['value'])
'Update country info on existing Nmap scan result documents'
def update_country(self, start, stop, code, create=False):
name = self.globaldb.data.country_name_by_code(code) for colname in [self.colname_hosts, self.colname_oldhosts]: self.db[colname].update(self.searchrange(start, stop), {'$set': {'infos.country_code': code, 'infos.country_name': name}}, multi=True)
'Update city/location info on existing Nmap scan result documents'
def update_city(self, start, stop, locid, create=False):
updatespec = dict(((('infos.%s' % key), value) for (key, value) in viewitems(self.globaldb.data.location_byid(locid)))) if ('infos.country_code' in updatespec): updatespec['infos.country_name'] = self.globaldb.data.country_name_by_code(updatespec['infos.country_code']) for colname in [self.colname_hosts, self.colname_oldhosts]: self.db[colname].update(self.searchrange(start, stop), {'$set': updatespec}, multi=True)
'Update AS info on existing Nmap scan result documents'
def update_as(self, start, stop, asnum, asname, create=False):
if (asname is None): updatespec = {'infos.as_num': asnum} else: updatespec = {'infos.as_num': asnum, 'infos.as_name': asname} for colname in [self.colname_hosts, self.colname_oldhosts]: self.db[colname].update(self.searchrange(start, stop), {'$set': updatespec}, multi=True)
'Initializes the "passive" columns, i.e., drops the columns, and creates the default indexes.'
def init(self):
self.db[self.colname_passive].drop() self.db[self.colname_ipdata].drop() self.create_indexes()
'Queries the passive column with the provided filter "spec", and returns a MongoDB cursor. This should be very fast, as no operation is done (the cursor is only returned). Next operations (e.g., .count(), enumeration, etc.) might take a long time, depending on both the operations and the filter. Any keyword argument is passed to the .find() method of the Mongodb column object, without any validation (and might have no effect if it is not expected).'
def get(self, spec, **kargs):
return self.set_limits(self.find(self.colname_passive, spec, **kargs))
'Same function as get, except .find_one() method is called instead of .find(), so the first record matching "spec" (or None) is returned. Unlike get(), this function might take a long time, depending on "spec" and the indexes set on colname_passive column.'
def get_one(self, spec, **kargs):
return self.find_one(self.colname_passive, spec, **kargs)
'Updates the first record matching "spec" in the "passive" column, setting values according to the keyword arguments.'
def update(self, spec, **kargs):
self.db[self.colname_passive].update(spec, {'$set': kargs})
'Inserts the record "spec" into the passive column.'
def insert(self, spec, getinfos=None):
if (getinfos is not None): spec.update(getinfos(spec)) self.db[self.colname_passive].insert(spec) if ('addr' in spec): self.set_data(spec['addr'])
'Like `.insert_or_update()`, but `specs` parameter has to be an iterable of (timestamp, spec) values. This will perform bulk MongoDB inserts with the major drawback that the `getinfos` parameter will be called (if it is not `None`) for each spec, even when the spec already exists in the database and the call was hence unnecessary. It\'s up to you to decide whether having bulk insert is worth it or if you want to go with the regular `.insert_or_update()` method.'
def insert_or_update_bulk(self, specs, getinfos=None):
bulk = self.db[self.colname_passive].initialize_unordered_bulk_op() count = 0 try: for (timestamp, spec) in specs: if (spec is not None): updatespec = {'$inc': {'count': 1}, '$min': {'firstseen': timestamp}, '$max': {'lastseen': timestamp}} if (getinfos is not None): infos = getinfos(spec) if infos: updatespec['$setOnInsert'] = infos bulk.find(spec).upsert().update(updatespec) count += 1 if (count >= config.BULK_UPSERTS_MAXSIZE): utils.LOGGER.debug('DB:MongoDB bulk upsert: %d', count) bulk.execute() bulk = self.db[self.colname_passive].initialize_unordered_bulk_op() count = 0 except IOError: pass if (count > 0): utils.LOGGER.debug('DB:MongoDB bulk upsert: %d (final)', count) bulk.execute()
'Updates the first record matching "spec" (without "firstseen", "lastseen" and "count") by mixing "firstseen", "lastseen" and "count" from "spec" and from the database. This is usefull to mix records from different databases.'
def insert_or_update_mix(self, spec, getinfos=None):
updatespec = {} if ('firstseen' in spec): updatespec['$min'] = {'firstseen': spec['firstseen']} del spec['firstseen'] if ('lastseen' in spec): updatespec['$max'] = {'lastseen': spec['lastseen']} del spec['lastseen'] if ('count' in spec): updatespec['$inc'] = {'count': spec['count']} del spec['count'] else: updatespec['$inc'] = {'count': spec['count']} if ('infos' in spec): updatespec['$setOnInsert'] = {'infos': spec['infos']} del spec['infos'] if ('fullinfos' in spec): if ('$setOnInsert' in updatespec): updatespec['$setOnInsert'].update({'fullinfos': spec['fullinfos']}) else: updatespec['$setOnInsert'] = {'fullinfos': spec['fullinfos']} del spec['fullinfos'] current = self.get_one(spec, fields=[]) if current: self.db[self.colname_passive].update({'_id': current['_id']}, updatespec) else: if ((getinfos is not None) and ('$setOnInsert' not in updatespec)): infos = getinfos(spec) if infos: updatespec['$setOnInsert'] = infos self.db[self.colname_passive].update(spec, updatespec, upsert=True)
'This method makes use of the aggregation framework to produce top values for a given field. If `distinct` is True (default), the top values are computed by distinct events. If it is False, they are computed based on the "count" field.'
def topvalues(self, field, distinct=True, **kargs):
if (not distinct): kargs['countfield'] = 'count' pipeline = self._topvalues(field, **kargs) return self.set_limits(self.db[self.colname_passive].aggregate(pipeline, cursor={}))
'This method makes use of the aggregation framework to produce distinct values for a given field.'
def distinct(self, field, flt=None, sort=None, limit=None, skip=None):
cursor = self.set_limits(self.db[self.colname_passive].aggregate(self._distinct(field, flt=flt, sort=sort, limit=limit, skip=skip), cursor={})) return (res['_id'] for res in cursor)