prefix
stringlengths
0
918k
middle
stringlengths
0
812k
suffix
stringlengths
0
962k
# coding=utf-8 # C
opyright 2022 The TensorFlow Datasets Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # Yo
u may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ImageNet-Sketch dataset.""" from tensorflow_datasets.image_classification.imagenet_sketch.imagenet_sketch import ImagenetSketch
## ## Biskit, a toolkit for the manipulation of macromolecular structures ## Copyright (C) 2004-2018 Raik Gruenberg & Johan Leckner ## ## This program is free software; you can redistribute it and/or ## modify it under the terms of the GNU General Public License as ## published by the Free Software Foundation; either version 3 of the ## License, or any later version. ## ## This program is distributed in the hope that it will be useful, ## but WITHOUT ANY WARRANTY; without even the implied warranty of ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU ## General Public License for more details. ## ## You find a copy of the GNU General Public License in the file ## license.txt along with this program; if not, write to the Free ## Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. ## ## """ Parallellized AmberEntropist calculation. """ from Biskit.PVM import JobSlave import Biskit.tools as T import Biskit.settings as settings from Biskit import LogFile from Biskit.AmberEntropist import AmberEntropist from Biskit.AmberCrdEntropist import EntropistError import os, time class AmberEntropySlave( JobSlave ): """ Collect AmberEntropist jobs from AmberEntropyMaster and return result. """ def initialize(self, params): """ expects:: {'nice':int, 'ferror':str, .. } @param params: initialisation parameters passed from the master @type params: dict """ self.__dict__.update( params ) self.errorLog = LogFile( self.ferror, mode='a' ) def reportError(self, msg, id ): try: try: print msg except: pass msg = 'trouble with ' + msg s = '%s on %s, run %s\n
' % (msg, os.uname()[1], id) s += '\Error:' + T.lastError() s += '\nErrorTrace:\n' + T.lastErrorTrace() + '\n' s += '\n' self.
errorLog.add( s ) except Exception, why: f = open('ErrorReportError_AmberEntropySlave','a') f.write( str(type(why)) ) try: f.write( T.lastErrorTrace() ) except: pass f.close() def go(self, jobs): """ The calculation. @param jobs: dictionary with { int_id : str_protocol } @type jobs: dict @return: result from AmberEntropist.run() @rtype: dict """ result = {} startTime = time.time() for id, protocol in jobs.items(): try: T.flushPrint( "%s " % str(id) ) protocol.update( {'nice':self.nice} ) x = None ## free memory from previous run x = AmberEntropist( **protocol ) x.run() r = x.result if r: r['__version_AmberEntropist'] = x.version() result[ id ] = r else: result[ id ] = None except EntropistError, why: self.reportError( str(type(why)), id ) except IOError, why: self.reportError( str(why), id ) except Exception, why: self.reportError( 'ERROR '+str(type(why)), id ) print "\navg time for last %i jobs: %f s" %\ ( len(jobs), (time.time()-startTime)/len(jobs)) return result if __name__ == '__main__': import sys if len(sys.argv) == 2: nice = int(sys.argv[1]) os.nice(nice) slave = AmberEntropySlave() slave.start()
ike of predictors, shape = [n_samples, p] Training vectors, where n_samples in the number of samples and p is the number of predictors. Y : array-like of response, shape = [n_samples, q], optional Training vectors, where n_samples in the number of samples and q is the number of response variables. copy : boolean Whether to copy X and Y, or perform in-place normalization. Returns ------- x_scores if Y is not given, (x_scores, y_scores) otherwise. """ return self.fit(X, y, **fit_params).transform(X, y) class PLSRegression(_PLS): """PLS regression PLSRegression implements the PLS 2 blocks regression known as PLS2 or PLS1 in case of one dimensional response. This class inherits from _PLS with mode="A", deflation_mode="regression", norm_y_weights=False and algorithm="nipals". Parameters ---------- X : array-like of predictors, shape = [n_samples, p] Training vectors, where n_samples in the number of samples and p is the number of predictors. Y : array-like of response, shape = [n_samples, q] Training vectors, where n_samples in the number of samples and q is the number of response variables. n_components : int, (default 2) Number of components to keep. scale : boolean, (default True) whether to scale the data max_iter : an integer, (default 500) the maximum number of iterations of the NIPALS inner loop (used only if algorithm="nipals") tol : non-negative real Tolerance used in the iterative algorithm default 1e-06. copy : boolean, default True Whether the deflation should be done on a copy. Let the default value to True unless you don't care about side effect Attributes ---------- `x_weights_` : array, [p, n_components] X block weights vectors. `y_weights_` : array, [q, n_components] Y block weights vectors. `x_loadings_` : array, [p, n_components] X block loadings vectors. `y_loadings_` : array, [q, n_components] Y block loadings vectors. `x_scores_` : array, [n_samples, n_components] X scores. `y_scores_` : array, [n_samples, n_components] Y scores. `x_rotations_` : array, [p, n_components] X block to latents rotations. `y_rotations_` : array, [q, n_components] Y block to latents rotations. coefs: array, [p, q] The coefficients of the linear model: Y = X coefs + Err Notes ----- For each component k, find weights u, v that optimizes: ``max corr(Xk u, Yk v) * var(Xk u) var(Yk u)``, such that ``|u| = 1`` Note that it maximizes both the correlations between the scores and the intra-block variances. The residual matrix of X (Xk+1) block is obtained by the deflation on the current X score: x_score. The residual matrix of Y (Yk+1) block is obtained by deflation on the current X score. This performs the PLS regression known as PLS2. This mode is prediction oriented. This implementation provides the same results that 3 PLS packages provided in the R language (R-project): - "mixOmics" with function pls(X, Y, mode = "regression") - "plspm " with function plsreg2(X, Y) - "pls" with function oscorespls.fit(X, Y) Examples -------- >>> from sklearn.cross_decomposition import PLSRegression >>> X = [[0., 0., 1.], [1.,0.,0.], [2.,2.,2.], [2.,5.,4.]] >>> Y = [[0.1, -0.2], [0.9, 1.1], [6.2, 5.9], [11.9, 12.3]] >>> pls2 = PLSRegression(n_components=2) >>> pls2.fit(X, Y) ... # doctest: +NORMALIZE_WHITESPACE PLSRegression(copy=True, max_iter=500, n_components=2, scale=True, tol=1e-06) >>> Y_pred = pls2.predict(X) References ---------- Jacob A. Wegelin. A survey of Partial Least Squares (PLS) methods, with emphasis on the two-block case. Technical Report 371, Department of Statistics, University of Washington, Seattle, 2000. In french but still a reference: Tenenhaus, M. (1998). La regression PLS: theorie et pratique. Paris: Editions Technic. """ def __init__(self, n_components=2, scale=True, max_iter=500, tol=1e-06, copy=True): _PLS.__init__(self, n_components=n_components, scale=scale, deflation_mode="regression", mode="A", norm_y_weights=False, max_iter=max_iter, tol=tol, copy=copy) class PLSCanonical(_PLS): """ PLSCanonical implements the 2 blocks canonical PLS of the original Wold algorithm [Tenenhaus 1998] p.204, referred as PLS-C2A in [Wegelin 2000]. This class inherits from PLS with mode="A" and deflation_mode="canonical", norm_y_weights=True and algorithm="nipals", but svd should provide similar results up to numerical errors. Parameters ---------- X : array-like of predictors, shape = [n_samples, p] Training vectors, where n_samples is the number of samples and p is the number of predictors. Y : array-like of response, shape = [n_samples, q] Training vectors, where n_samples is the number of samples and q is the number of response variables. n_components : int, number of components to keep. (default 2). scale : boolean, scale data? (default True) algorithm : string, "nipals" or "svd" The algorithm used to estimate the weights. It will be called n_components times, i.e. once for each iteration of the outer loop. max_iter : an integer, (default 500) the maximum number of iterations of the NIPALS inner loop (used only if algorithm="nipals") tol : non-negative real, default 1e-06 the tolerance used in the iterative algorithm copy : boolean, default True Whether the deflation should be done on a copy. Let the default value to True unless you don't care about side effect Attributes ---------- `x_weights_` : array, shape = [p, n_components] X block weights vectors. `y_weights_` : array, shape = [q, n_components] Y block weights vectors. `x_loadings_` : array, shape = [p, n_components] X block loadings vectors. `y_loadings_` : array, shape = [q, n_components] Y block loadings vectors. `x_scores
_` : array, shape = [n_samples, n_components] X scores. `y_scores_` : array, shape = [n_samples, n_components] Y scores. `x_rotations_` : array, shape = [p, n_components] X block to latents rotations. `y_rotations_` : array, shape = [q, n_components] Y block to latents rotations. Notes ----- For each component k, find weights u, v that optimize:: max corr(Xk u, Yk v) * var(
Xk u) var(Yk u), such that ``|u| = |v| = 1`` Note that it maximizes both the correlations between the scores and the intra-block variances. The residual matrix of X (Xk+1) block is obtained by the deflation on the current X score: x_score. The residual matrix of Y (Yk+1) block is obtained by deflation on the current Y score. This performs a canonical symmetric version of the PLS regression. But slightly different than the CCA. This is mostly used for modeling. This implementation provides the same results that the "plspm" package provided in the R language (R-project), using the function plsca(X, Y). Results are equal or collinear with the function ``pls(..., mode = "canonical")`` of the "mixOmics" package. The difference relies in the fact that mixOmics implementation does not exactly implement the Wold algorithm since it does not normalize y_weights to one. Examples -------- >>> from sklearn.cross_decomposition import PLSCanonical >>> X = [[0., 0., 1.], [1.,0.,0.], [2.,2.,2.], [2.,5.,4.]] >>> Y = [[0.1, -0.2], [0.9, 1.1], [6.2, 5.9], [11.9, 12.3]] >>> plsca = PLSCanonical(n_components=2) >>> plsca.fit(X, Y) ... # doctest: +NORMALIZE_WHITESPACE PLSCanonical(
# -*- coding: utf-8 -*- from __future__ import unicode_literals from django.db import migrati
ons, models class Migration(migrations.Migration): dependencies = [ ('orders', '0001_initial'), ] operations = [
migrations.AlterField( model_name='order', name='paid', field=models.BooleanField(default=False), ), ]
# Django settings for trywsk project. DEBUG = False TEMPLATE_DEBUG = DEBUG import os from unipath import Path PROJECT_ROOT = Path(__file__).ancestor(2) PROJECT_ROOT = os.path.join(PROJECT_ROOT,'whisk_tutorial') ADMINS = ( ('IBM jStart', 'jstart@us.ibm.com'), ) MANAGERS = ADMINS DATABASES = {} TEST_RUNNER = 'testing.DatabaselessTestRunner' SESSION_ENGINE = 'django.contrib.sessions.backends.file' # Hosts/domain names that are valid for this site; required if DEBUG is False # See https://docs.djangoproject.com/en/1.5/ref/settings/#allowed-hosts ALLOWED_HOSTS = ['*'] #!! Change to [yourhost]. DO NOT USE [*] IN PRODUCTION as there are security issues # Local time zone for this installation. Choices can be found here: # http://en.wikipedia.org/wiki/List_of_tz_zones_by_name # although not all choices may be available on all operating systems. # In a Windows environment this must be set to your system time zone. TIME_ZONE = 'America/New_York' # Language code for this installation. All choices can be found here: # http://www.i18nguy.com/unicode/language-identifiers.html LANGUAGE_CODE = 'en-us' SITE_ID = 1 # If you set this to False, Django will make some optimizations so as not # to load the internationalization machinery. USE_I18N = True # If you set this to False, Django will not format dates, numbers and # calendars according to the current locale. USE_L10N = True # If you set this to False, Django will not use timezone-aware datetimes. USE_TZ = True # Absolute filesystem path to the directory that will hold user-uploaded files. # Example: "/var/www/example.com/media/" MEDIA_ROOT = '' # Absolute path to the directory static files should be collected to. # Don't put anything in this directory yourself; store your static files # in apps' "static/" subdirectories and in STATICFILES_DIRS. # Example: "/var/www/example.com/static/" STATIC_ROOT = os.path.join(PROJECT_ROOT,'static') PROJECT_PATH = os.path.dirname(os.path.abspath(__file__)); # URL prefix for static files. # Example: "http://example.com/static/", "http://static.example.com/" STATIC_URL = '/static/' print(PROJECT_ROOT.child('static')) # Additional locations of static files STATICFILES_DIRS = ( PROJECT_ROOT.child('static'), # Put strings here, like "/home/html/static" or "C:/www/django/static". # Always use forward slashes, even on Windows. # Don't forget to use absolute paths, not relative paths. ) SECRET_KEY = "yabadabadoo" # List of finder classes that know how to find static files in # various locations. STATICFILES_FINDERS = ( 'django.contrib.staticfiles.finders.FileSystemFinder', 'django.contrib.staticfiles.finders.AppDirectoriesFinder', # 'django.contrib.staticfiles.finders.DefaultStorageFinder', ) # List of callables that know how to import templates from various sources. TEMPLATE_LOADERS = ( 'django.template.loaders.filesystem.Loader', 'django.template.loaders.app_directories.Loader', # 'django.template.loaders.eggs.Loader', ) TEMPLATE_CONTEXT_PROCESSORS = ( "django.contrib.auth.context_processors.auth", "django.core.context_processors.request", "django.core.context_processors.static", "django.core.context_processors.media" ) MIDDLEWARE_CLASSES = ( 'django.middleware.common.CommonMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', ) # Local memory caching. We only have a couple of non-dynamic pages, but they are # being generated dynamically... So, we might as well cache the whole thing in memory. CACHES = { 'default': { # for session data 'BACKEND': 'django.core.cache.backends.db.DatabaseCache', 'LOCATION': 'cachetable', }, 'database_cache': { # for tweets 'BACKEND': 'django.core.cache.backends.db.DatabaseCache', 'LOCATION': 'dbcache1', }, 'LocMemCache': { # used for storing the mailchimp object 'BACKEND': 'django.core.cache.backends.locmem.LocMemCache', 'LOCATION': 'unique-snowflake' }, 'disk_cache': { # former tweet cache 'BACKEND': 'django.core.cache.backends.filebased.FileBasedCache', 'LOCATION': PROJECT_ROOT.child('cache'), }, } ROOT_URLCONF = 'deploy_settings.urls' # Python dotted path to the WSGI application used by Django's runserver. WSGI_APPLICATION = 'deploy_settings.wsgi.application' TEMPLATE_DIRS = ( # Put strings here, like "/home/html/django_templates" or "C:/www/django/templates". # Always use forward slashes, even on Windows. # Don't forget to use absolute paths, not relative paths. PROJECT_ROOT.child('templates'), #PROJECT_ROOT.child('_pages'), ) PREREQ_APPS = ( 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', 'django.contrib.admin', 'markdown_deu
x', #'django_extensions', #'south' ) PROJECT_APPS = ( # 'base', ) INSTALLED_APPS = PREREQ_APPS + PROJECT_APPS # A sample l
ogging configuration. The only tangible logging # performed by this configuration is to send an email to # the site admins on every HTTP 500 error when DEBUG=False. # See http://docs.djangoproject.com/en/dev/topics/logging for # more details on how to customize your logging configuration. LOGGING = { 'version': 1, 'disable_existing_loggers': False, 'filters': { 'require_debug_false': { '()': 'django.utils.log.RequireDebugFalse' } }, 'handlers': { 'mail_admins': { 'level': 'ERROR', 'filters': ['require_debug_false'], 'class': 'django.utils.log.AdminEmailHandler' } }, 'loggers': { 'django.request': { 'handlers': ['mail_admins'], 'level': 'ERROR', 'propagate': True, }, } } # see https://github.com/trentm/django-markdown-deux for optional markdown settings MARKDOWN_DEUX_STYLES = { "default": { "extras": { "code-friendly": None, }, "safe_mode": None, }, }
import os impor
t sys sys.path.append(os.path.join(os.path.dirname(__file__), '../tools')) import fasta import genetics import table def main(argv): codon = table.codon(argv[0]) strings = fasta.read_ordered(argv[1]) dna = strings[0] introns = strings[1:] for intron in introns: dna = dna.replace(intron, '') print genetics.encode
_protein(genetics.dna_to_rna(dna), codon) if __name__ == "__main__": main(sys.argv[1:])
"""Base settings shared by all environments. This is a reusable basic settings file. """ from django.conf.global_settings import * import os import sys import re # Local time zone for this installation. Choices can be found here: # http://en.wikipedia.org/wiki/List_of_tz_zones_by_name TIME_ZONE = 'GB' USE_TZ = True USE_I18N = True USE_L10N = True LANGUAGE_CODE = 'en-GB' LANGUAGES = ( ('en-GB', 'British English'), ) SITE_ID = 1 LOGIN_URL = '/login/' LOGOUT_URL = '/logout/' LOGIN_REDIRECT_URL = '/' STATIC_URL = '/static/' MEDIA_URL = '/uploads/' ADMINS = ( ('David Seddon', 'david@seddonym.me'), ) MANAGERS = ADMINS DATABASES = { 'default': { 'ENGINE': 'django.db.backends.postgresql_psycopg2', } } LOGGING = { 'version': 1, 'disable_existing_loggers': False, 'formatters': { 'standard': { 'format' : "[%(asctime)s] %(levelname)s [%(name)s:%(lineno)s] %(message)s", 'datefmt' : "%d/%b/%Y %H:%M:%S" }, }, 'handlers': { 'error': { 'level':'ERROR', 'class':'logging.handlers.RotatingFileHandler', # 'filename': ERROR_LOG_PATH, - filled in by handler 'maxBytes': 50000, 'backupCount': 2, 'formatter': 'standard', }, 'debug': { 'level':'DEBUG', 'class':'logging.handlers.RotatingFileHandler', # 'filename': DEBUG_LOG_PATH, - filled in by handler 'maxBytes': 50000, 'backupCount': 2, 'formatter': 'standard', }, 'mail_admins': { 'level': 'ERROR', 'class': 'django.utils.log.AdminEmailHandler', 'include_html': True, }, }, 'loggers': { 'django': { 'handlers':['error'], 'propagate': True, 'level':'DEBUG', }, 'django.request': { 'handlers': ['mail_admins', 'error'], 'level': 'ERROR', 'propagate': False, }, 'project': { 'handlers':['debug'], 'propagate': True, 'level':'DEBUG', }, } } TEMPLATE_CONTEXT_PROCESSORS += ( 'django.core.context_processors.request', ) ROOT_URLCONF = 'urls' INSTALLED_APPS = ( 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'djang
o.contrib.sites', 'django.contrib.messages', 'django.contrib.staticfiles', 'django.co
ntrib.admin', ) MIDDLEWARE_CLASSES = ( 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', )
# test re.sub with unmatched groups, behaviour changed in CPython 3.5 try: import ure as re except ImportError: try:
import re except ImportError: print("SKIP") raise SystemExit try: re.sub except AttributeError: print("SKIP") raise SystemExit # first
group matches, second optional group doesn't so is replaced with a blank print(re.sub(r"(a)(b)?", r"\2-\1", "1a2"))
#!/usr/bin/python3 # -*- coding: utf-8 -*- '''Pychemqt, Chemical Engineering Process simulator Copyright (C) 2009-2017, Juan José Gómez Romera <jjgomera@gmail.com> This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or
(at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PU
RPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>.''' ############################################################################### # Tools to create a python shell with pychemqt libraries imported # For now only work in linux with xterm as terminal ############################################################################### import atexit from PyQt5 import QtCore, QtWidgets from tools.firstrun import which class XTerm(QtCore.QProcess): """Gui container for terminal widget""" def __init__(self, config, parent=None): super(XTerm, self).__init__(parent) self.config = config atexit.register(self.kill) self.show_term() def sizeHint(self): size = QtCore.QSize(400, 300) return size.expandedTo(QtWidgets.QApplication.globalStrut()) def show_term(self): term = self.config.get("Applications", 'Shell') args = [ "-bg", self.config.get("Applications", "backgroundColor"), "-fg", self.config.get("Applications", "foregroundColor"), # blink cursor "-bc", # title "-T", QtWidgets.QApplication.translate( "pychemqt", "pychemqt python console")] if self.config.getboolean("Applications", "maximized"): args.append("-maximized") if self.config.getboolean("Applications", 'ipython') and \ which("ipython"): args.append("ipython3") else: args.append("python3") self.start(term, args) if self.error() == QtCore.QProcess.FailedToStart: print("xterm not installed") if __name__ == "__main__": import sys from configparser import ConfigParser import os app = QtWidgets.QApplication(sys.argv) conf_dir = os.path.expanduser('~') + "/.pychemqt/" pychemqt_dir = os.environ["PWD"] + "/" preferences = ConfigParser() preferences.read(conf_dir+"pychemqtrc") terminal = XTerm(preferences) app.exec_()
'Meta': {'object_name': 'Group'}, 'address': ('django.db.models.fields.TextField', [], {'blank': 'True'}), 'city': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}), 'email': ('django.db.models.fields.EmailField', [], {'default': "u''", 'max_length': '75', 'blank': 'True'}), 'fax': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'phone': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}), 'postal_code': ('django.db.models.fields.CharField', [], {'max_length': '20', 'blank': 'True'}), 'website': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}) }, u'aldryn_people.grouptranslation': { 'Meta': {'unique_together': "[(u'language_code', u'master')]", 'object_name': 'GroupTranslation', 'db_table': "u'aldryn_people_group_translation'"}, 'description': ('djangocms_text_ckeditor.fields.HTMLField', [], {'blank': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'language_code': ('django.db.models.fields.CharField', [], {'max_length': '15', 'db_index': 'True'}), u'master': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'translations'", 'null': 'True', 'to': u"orm['aldryn_people.Group']"}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}), 'slug': ('django.db.models.fields.SlugField', [], {'default': "u''", 'max_length': '255'}) }, u'aldryn_people.peopleplugin': { 'Meta': {'object_name': 'PeoplePlugin'}, u'cmsplugin_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['cms.CMSPlugin']", 'unique': 'True', 'primary_key': 'True'}),
'group_by_group': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'people': ('aldryn_common.admin_fields.sortedm2m.SortedM2MModelField', [], {'symmetrical': 'False', 'to': u"orm['aldryn_people.Person']", 'null': 'True', 'blank': 'True'}), 'show_links': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
, 'show_vcard': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'style': ('django.db.models.fields.CharField', [], {'default': "u'standard'", 'max_length': '50'}) }, u'aldryn_people.person': { 'Meta': {'object_name': 'Person'}, 'email': ('django.db.models.fields.EmailField', [], {'default': "u''", 'max_length': '75', 'blank': 'True'}), 'fax': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}), 'groups': ('sortedm2m.fields.SortedManyToManyField', [], {'default': 'None', 'related_name': "u'people'", 'blank': 'True', 'symmetrical': 'False', 'to': u"orm['aldryn_people.Group']"}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'mobile': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}), 'phone': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}), 'user': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "u'persons'", 'unique': 'True', 'null': 'True', 'to': u"orm['auth.User']"}), 'vcard_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'visual': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': "orm['filer.Image']", 'null': 'True', 'on_delete': 'models.SET_NULL', 'blank': 'True'}), 'website': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}) }, u'aldryn_people.persontranslation': { 'Meta': {'unique_together': "[(u'language_code', u'master')]", 'object_name': 'PersonTranslation', 'db_table': "u'aldryn_people_person_translation'"}, 'description': ('djangocms_text_ckeditor.fields.HTMLField', [], {'default': "u''", 'blank': 'True'}), 'function': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '255', 'blank': 'True'}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'language_code': ('django.db.models.fields.CharField', [], {'max_length': '15', 'db_index': 'True'}), u'master': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'translations'", 'null': 'True', 'to': u"orm['aldryn_people.Person']"}), 'name': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '255'}), 'slug': ('django.db.models.fields.SlugField', [], {'default': "u''", 'max_length': '255'}) }, u'auth.group': { 'Meta': {'object_name': 'Group'}, u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}), 'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}) }, u'auth.permission': { 'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'}, 'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}), 'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'name': ('django.db.models.fields.CharField', [], {'max_length': '50'}) }, u'auth.user': { 'Meta': {'object_name': 'User'}, 'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}), 'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}), 'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}), 'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}), 'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}), 'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}), 'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}), 'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'}) }, 'cms.cmsplugin': { 'Meta': {'object_name': 'CMSPlugin'}, 'changed_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}), 'creation_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}), 'depth': ('django.db.models.fields.PositiveIntegerField', [], {}), u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
""" telemetry full tests. """ import platform import sys from unittest import mock import pytest import wandb def test_telemetry_finish(runner, live_mock_server, parse_ctx): with runner.isolated_filesystem(): run = wandb.init() run.finish() ctx_util = parse_ctx(live_mock_server.get_ctx()) telemetry = ctx_util.telemetry assert telemetry and 2 in telemetry.get("3", []) def test_telemetry_imports_hf(runner, live_mock_server, parse_ctx): with runner.isolated_filesystem(): run = wandb.init() with mock.patch.dict("sys.modules", {"transformers": mock.Mock()}): import transformers run.finish() ctx_util = parse_ctx(live_mock_server.get_ctx()) telemetry = ctx_util.telemetry # hf in finish modules but not in init modules assert telemetry and 11 not in telemetry.get("1", []) assert telemetry and 11 in telemetry.get("2", []) def test_telemetry_imports_catboost(runner, live_mock_server, parse_ctx): with runner.isolated_filesystem(): wi
th mock.patch.dict("sys.modules", {"catboost": mock.Mock()}): import catboost run = wandb.init() run.finish() ctx_util = parse_ctx(live_mock_server.get_ctx()) telemetry = ctx_util.telemetry
# catboost in both init and finish modules assert telemetry and 7 in telemetry.get("1", []) assert telemetry and 7 in telemetry.get("2", []) @pytest.mark.skipif( platform.system() == "Windows", reason="test suite does not build jaxlib on windows" ) @pytest.mark.skipif(sys.version_info >= (3, 10), reason="jax has no py3.10 wheel") def test_telemetry_imports_jax(runner, live_mock_server, parse_ctx): with runner.isolated_filesystem(): import jax wandb.init() wandb.finish() ctx_util = parse_ctx(live_mock_server.get_ctx()) telemetry = ctx_util.telemetry # jax in finish modules but not in init modules assert telemetry and 12 in telemetry.get("1", []) assert telemetry and 12 in telemetry.get("2", []) def test_telemetry_run_organizing_init(runner, live_mock_server, parse_ctx): with runner.isolated_filesystem(): wandb.init(name="test_name", tags=["my-tag"], config={"abc": 123}, id="mynewid") wandb.finish() ctx_util = parse_ctx(live_mock_server.get_ctx()) telemetry = ctx_util.telemetry assert telemetry and 13 in telemetry.get("3", []) # name assert telemetry and 14 in telemetry.get("3", []) # id assert telemetry and 15 in telemetry.get("3", []) # tags assert telemetry and 16 in telemetry.get("3", []) # config def test_telemetry_run_organizing_set(runner, live_mock_server, parse_ctx): with runner.isolated_filesystem(): run = wandb.init() run.name = "test-name" run.tags = ["tag1"] wandb.config.update = True run.finish() ctx_util = parse_ctx(live_mock_server.get_ctx()) telemetry = ctx_util.telemetry assert telemetry and 17 in telemetry.get("3", []) # name assert telemetry and 18 in telemetry.get("3", []) # tags assert telemetry and 19 in telemetry.get("3", []) # config update
''' Crunchyroll urlresolver plugin Copyright (C) 2013 voinage This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received
a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. ''' from t0mm0.common.net import Net from urlresolver.plugnplay.interfaces import UrlResolver from urlresolver.plugnplay.interfaces imp
ort PluginSettings from urlresolver.plugnplay import Plugin import re import urllib2 from urlresolver import common import os class CrunchyRollResolver(Plugin, UrlResolver, PluginSettings): implements = [UrlResolver, PluginSettings] name = "crunchyroll" domains = [ "crunchyroll.com" ] def __init__(self): p = self.get_setting('priority') or 100 self.priority = int(p) self.net = Net() #http://www.crunchyroll.co.uk/07-ghost/episode-2-nostalgic-memories-accompany-pain-573286 #http://www.crunchyroll.com/07-ghost/episode-2-nostalgic-memories-accompany-pain-573286 def get_media_url(self, host, media_id): web_url = self.get_url(host, media_id) html=self.net.http_GET('http://www.crunchyroll.com/android_rpc/?req=RpcApiAndroid_GetVideoWithAcl&media_id=%s'%media_id,{'Host':'www.crunchyroll.com', 'X-Device-Uniqueidentifier':'ffffffff-931d-1f73-ffff-ffffaf02fc5f', 'X-Device-Manufacturer':'HTC', 'X-Device-Model':'HTC Desire', 'X-Application-Name':'com.crunchyroll.crunchyroid', 'X-Device-Product':'htc_bravo', 'X-Device-Is-GoogleTV':'0'}).content mp4=re.compile(r'"video_url":"(.+?)","h"').findall(html.replace('\\',''))[0] return mp4 def get_url(self, host, media_id): return 'http://www.crunchyroll.com/android_rpc/?req=RpcApiAndroid_GetVideoWithAcl&media_id=%s' % media_id def get_host_and_id(self, url): r = re.match(r'http://www.(crunchyroll).+?/.+?/.+?([^a-zA-Z-+]{6})', url) if r: return r.groups() else: return False def valid_url(self, url, host): if self.get_setting('enabled') == 'false': return False return (re.match(r'http://www.(crunchyroll).+?/.+?/.+?([^a-zA-Z-+]{6})', url) or 'crunchyroll' in host)
from django.apps import
apps from contextlib import contextmanager def session(): return apps.get_app_config('basex').basex @contextmanager def recipe_db(): s = session()
s.execute('open recipe') yield s s.close()
#! /usr/bin/env python """ Sample script that illustrates exclusive card connection decorators. __author__ = "http://www.gemalto.com" Copyright 2001-2010 gemalto Author: Jean-Daniel Aussel, mailto:jean-daniel.aussel@gemalto.com This file is part of pyscard. pyscard is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your
option) any later version. pyscard is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License a
long with pyscard; if not, write to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA """ from smartcard.CardType import AnyCardType from smartcard.CardRequest import CardRequest from smartcard.CardConnectionObserver import ConsoleCardConnectionObserver from smartcard.CardConnection import CardConnection from smartcard.util import toHexString from smartcard.ExclusiveConnectCardConnection import ExclusiveConnectCardConnection from smartcard.ExclusiveTransmitCardConnection import ExclusiveTransmitCardConnection # define the apdus used in this script GET_RESPONSE = [0XA0, 0XC0, 00, 00] SELECT = [0xA0, 0xA4, 0x00, 0x00, 0x02] DF_TELECOM = [0x7F, 0x10] # request any card type cardtype = AnyCardType() cardrequest = CardRequest(timeout=5, cardType=cardtype) cardservice = cardrequest.waitforcard() # attach the console tracer observer = ConsoleCardConnectionObserver() cardservice.connection.addObserver(observer) # attach our decorator cardservice.connection = ExclusiveTransmitCardConnection(ExclusiveConnectCardConnection(cardservice.connection)) # connect to the card and perform a few transmits cardservice.connection.connect() print 'ATR', toHexString(cardservice.connection.getATR()) try: cardservice.connection.lock() apdu = SELECT + DF_TELECOM response, sw1, sw2 = cardservice.connection.transmit(apdu) if sw1 == 0x9F: apdu = GET_RESPONSE + [sw2] response, sw1, sw2 = cardservice.connection.transmit(apdu) finally: cardservice.connection.unlock() import sys if 'win32' == sys.platform: print 'press Enter to continue' sys.stdin.read(1)
import os from nose.tools import (assert_equal, assert_
true) from ckantoolkit import config import ckan.tests.helpers as helpers import ckan.tests.factories as factories import ckanapi import boto from moto import mock_s3 import logging log = logging.getLogger(__name__) class TestS3ControllerResourceDownload(helpers.FunctionalTestBase):
def _upload_resource(self): factories.Sysadmin(apikey="my-test-key") app = self._get_test_app() demo = ckanapi.TestAppCKAN(app, apikey='my-test-key') factories.Dataset(name="my-dataset") file_path = os.path.join(os.path.dirname(__file__), 'data.csv') resource = demo.action.resource_create(package_id='my-dataset', upload=open(file_path), url='file.txt') return resource, demo, app @mock_s3 @helpers.change_config('ckan.site_url', 'http://mytest.ckan.net') def test_resource_show_url(self): '''The resource_show url is expected for uploaded resource file.''' resource, demo, _ = self._upload_resource() # does resource_show have the expected resource file url? resource_show = demo.action.resource_show(id=resource['id']) expected_url = 'http://mytest.ckan.net/dataset/{0}/resource/{1}/download/data.csv' \ .format(resource['package_id'], resource['id']) assert_equal(resource_show['url'], expected_url) @mock_s3 def test_resource_download_s3(self): '''A resource uploaded to S3 can be downloaded.''' resource, demo, app = self._upload_resource() resource_show = demo.action.resource_show(id=resource['id']) resource_file_url = resource_show['url'] file_response = app.get(resource_file_url) assert_equal(file_response.content_type, 'text/csv') assert_true('date,price' in file_response.body) @mock_s3 def test_resource_download_s3_no_filename(self): '''A resource uploaded to S3 can be downloaded when no filename in url.''' resource, demo, app = self._upload_resource() resource_file_url = '/dataset/{0}/resource/{1}/download' \ .format(resource['package_id'], resource['id']) file_response = app.get(resource_file_url) assert_equal(file_response.content_type, 'text/csv') assert_true('date,price' in file_response.body) @mock_s3 def test_resource_download_url_link(self): '''A resource with a url (not file) is redirected correctly.''' factories.Sysadmin(apikey="my-test-key") app = self._get_test_app() demo = ckanapi.TestAppCKAN(app, apikey='my-test-key') dataset = factories.Dataset() resource = demo.action.resource_create(package_id=dataset['id'], url='http://example') resource_show = demo.action.resource_show(id=resource['id']) resource_file_url = '/dataset/{0}/resource/{1}/download' \ .format(resource['package_id'], resource['id']) assert_equal(resource_show['url'], 'http://example') conn = boto.connect_s3() bucket = conn.get_bucket('my-bucket') assert_equal(bucket.get_all_keys(), []) # attempt redirect to linked url r = app.get(resource_file_url, status=[302, 301]) assert_equal(r.location, 'http://example')
de_formatter.extra import LinebreakingAttributeFormatter >>> formatters = dict(base.formatters, ... **{Call: LinebreakingAttributeFormatter.call_formatter_factory(base.formatters[ast.Call]), ... Attribute: LinebreakingAttributeFormatter, ... Subscript: LinebreakingAttributeFormatter.subscription_formatter_factory(base.formatters[ast.Subscript])}) >>> print format_code('instance.identifier.identifier()', ... formatters_register=formatters, width=3, force=True) (instance.identifier .identifier()) """ class AttrsRefsListFormatter(base.ListOfExpressionsFormatter): separator = '.' class _IdentifierFormatter(base.CodeFormatter): def __init__(self, identifier, formatters_register, parent): self.identifier = identifier self.parent = parent super(LinebreakingAttributeFormatter._IdentifierFormatter, self).__init__(formatters_register) def _format_code(self, width, continuation, suffix): block = CodeBlock.from_tokens(self.identifier) if suffix is not None: block.merge(suffix) return block @classmethod def call_formatter_factory(cls, CallFormatter): class RedirectingCallFormatter(CallFormatter): def __new__(cls, expr, formatters_register, parent=None, func_formatter=None): # if func_formatter is not provided check whether we are not part of method call if func_formatter is None and isinstance(expr.func, ast.Attribute): return LinebreakingAttributeFormatter(expr, formatters_register, parent) return super(RedirectingCallFormatter, cls).__new__(cls, expr=expr, formatters_register=formatters_register, parent=parent, func_formatter=func_formatter) def __init__(self, expr, formatters_register, parent=None, func_formatter=None): super(RedirectingCallFormatter, self).__init__(expr, formatters_register, parent) if func_formatter: self._func_formatter = func_formatter return RedirectingCallFormatter @classmethod def subscription_formatter_factory(cls, SubscriptionFormatter): class RedirectingSubsriptionFormatter(SubscriptionFormatter): def __new__(cls, expr, formatters_register, par
ent=None, value_formatter=None): # if value_formatt
er is not provided check wether we are not part of attribute ref if value_formatter is None and isinstance(expr.value, ast.Attribute): return LinebreakingAttributeFormatter(expr, formatters_register, parent) return super(RedirectingSubsriptionFormatter, cls).__new__(cls, expr=expr, formatters_register=formatters_register, parent=parent, value_formatter=value_formatter) def __init__(self, expr, formatters_register, parent=None, value_formatter=None): super(RedirectingSubsriptionFormatter, self).__init__(expr, formatters_register, parent) if value_formatter: self._value_formatter = value_formatter return RedirectingSubsriptionFormatter @classmethod def register(cls, formatters_register): formatters_register[ast.Attribute] = cls formatters_register[ast.Subscript] = cls.subscription_formatter_factory(formatters_register[ast.Subscript]) formatters_register[ast.Call] = cls.call_formatter_factory(formatters_register[ast.Call]) return formatters_register def __init__(self, *args, **kwargs): super(base.AttributeFormatter, self).__init__(*args, **kwargs) self._attrs_formatters = [] expr = self.expr while (isinstance(expr, ast.Attribute) or isinstance(expr, ast.Call) and isinstance(expr.func, ast.Attribute) or isinstance(expr, ast.Subscript) and isinstance(expr.value, ast.Attribute)): if isinstance(expr, ast.Attribute): self._attrs_formatters.insert(0, LinebreakingAttributeFormatter._IdentifierFormatter(expr.attr, self.formatters_register, parent=self)) expr = expr.value elif isinstance(expr, ast.Call): # FIXME: how to fix parent?? should we change type of parent to ast type? func_formatter = LinebreakingAttributeFormatter._IdentifierFormatter( (expr.func .attr), self.formatters_register, parent=self) CallFormatter = self.get_formatter_class(expr) call_formater = CallFormatter(func_formatter=func_formatter, expr=expr, formatters_register=self.formatters_register, parent=self) self._attrs_formatters.insert(0, call_formater) expr = expr.func.value elif isinstance(expr, ast.Subscript): # FIXME: how to fix parent?? should we change type of parent to ast type? value_formatter = LinebreakingAttributeFormatter._IdentifierFormatter( (expr.value.attr), self.formatters_register, parent=self) SubscriptionFormatter = self.get_formatter_class(expr) subscription_formatter = SubscriptionFormatter(value_formatter=value_formatter, expr=expr, formatters_register=self.formatters_register, parent=self) self._attrs_formatters.insert(0, subscription_formatter) expr = expr.value.value self.value_formatter = self.get_formatter(expr) def _format_code(self, width, continuation, suffix): def _format(continuation, prefix=None): block = CodeBlock.from_tokens(prefix) if prefix else CodeBlock() for i in range(0, width - block.width + 1): block.merge(self.value_formatter.format_code(width - block.width - i)) separator = CodeBlock.from_tokens('.') attr_ref_indent = block.width block.merge(separator.copy()) try: block.merge(self._attrs_formatters[0] .format_code(width - block.last_line.width, False, suffix=(suffix if len(self._attrs_formatters) == 1 else None))) for attr_formatter in self._attrs_formatters[1:]: s = suffix if self._attrs_formatters[-1] == attr_formatter else None try: attr_block = attr_formatter.format_code(width - block.last_line.width - separator.width, False, suffix=s) except NotEnoughSpace:
timedelta(minutes=30 * i) max_events = max(max_events, len(schedule[half_hour][id])) max_simul[id] = max_events for half_hour in schedule: for location in schedule[half_hour]: for event in schedule[half_hour][location]: if isinstance(event, Event): simul = max(len(schedule[half_hour][event.location]) for half_hour in event.half_hours) event.colspan = 1 if simul > 1 else max_simul[event.location] for i in range(1, event.duration): schedule[half_hour + timedelta(minutes=30*i)][event.location].remove(c.EVENT_BOOKED) schedule[half_hour + timedelta(minutes=30*i)][event.location].append(event.colspan) for half_hour in schedule: for id, name in c.EVENT_LOCATION_OPTS: span_sum = sum(getattr(e, 'colspan', e) for e in schedule[half_hour][id]) for i in range(max_simul[id] - span_sum): schedule[half_hour][id].append(c.EVENT_OPEN) schedule[half_hour] = sorted(schedule[half_hour].items(), key=lambda tup: c.ORDERED_EVENT_LOCS.index(tup[0])) max_simul = [(id, c.EVENT_LOCATIONS[id], colspan) for id, colspan in max_simul.items()] return { 'message': message, 'schedule': sorted(schedule.items()), 'max_simul': sorted(max_simul, key=lambda tup: c.ORDERED_EVENT_LOCS.index(tup[0])) } @unrestricted @csv_file def time_ordered(self, out, session): for event in session.query(Event).order_by('start_time', 'duration', 'location').all(): out.writerow([event.timespan(30), event.name, event.location_label]) @unrestricted def xml(self, session): cherrypy.response.headers['Content-type'] = 'text/xml' schedule = defaultdict(list) for event in session.query(Event).order_by('start_time').all(): schedule[event.location_label].append(event) return render('schedule/schedule.xml', { 'schedule': sorted(schedule.items(), key=lambda tup: c.ORDERED_EVENT_LOCS.index(tup[1][0].location)) }) @unrestricted def schedule_tsv(self, session): cherrypy.response.headers['Content-Type'] = 'text/tsv' cherrypy.response.headers['Content-Disposition'] = 'attachment;filename=Schedule-{}.tsv'.format(int(localized_now().timestamp())) schedule = defaultdict(list) for event in session.query(Event).order_by('start_time').all(): schedule[event.location_label].append(dict(event.to_dict(), **{ 'date': event.start_time_local.strftime('%m/%d/%Y'), 'start_time': event.start_time_local.strftime('%I:%M:%S %p'), 'end_time': (event.start_time_local + timedelta(minutes=event.minutes)).strftime('%I:%M:%S %p'), 'description': normalize_newlines(event.description).replace('\n', ' ') })) return render('schedule/schedule.tsv', { 'schedule': sorted(schedule.items(), key=lambda tup: c.ORDERED_EVENT_LOCS.index(tup[1][0]['location'])) }) @csv_file def csv(self, out, session): out.writerow(['Session Title', 'Date', 'Time Start', 'Time End', 'Room/Location', 'Schedule Track (Optional)', 'Description (Optional)', 'Allow Checkin (Optional)', 'Checkin Begin (Optional)', 'Limit Spaces? (Optional)', 'Allow Waitlist (Optional)']) rows = [] for event in session.query(Event).order_by('start_time').all(): rows.append([ event.name, event.start_time_local.strftime('%m/%d/%Y'), event.start_time_local.strftime('%I:%M:%S %p'), (event.start_time_local + timedelta(minutes=event.minutes)).strftime('%I:%M:%S %p'), event.location_label, '', normalize_newlines(event.description).replace('\n', ' '), '', '', '', '' ]) for r in sorted(rows, key=lambda tup: tup[4]): out.writerow(r) @csv_file def panels(self, out, session): out.writerow(['Panel', 'Time', 'Duration', 'Room', 'Description', 'Panelists']) for event in sorted(session.query(Event).all(), key=lambda e: [e.start_time, e.location_label]): if 'Panel' in event.location_label or 'Autograph' in event.location_label: out.writerow([event.name, event.start_time_local.strftime('%I%p %a').lstrip('0'), '{} minutes'.format(event.minutes), event.location_label, event.description, ' / '.join(ap.attendee.full_name for ap in sorted(event.assigned_panelists, key=lambda ap: ap.attendee.full_name))]) @unrestricted def panels_json(self, session): cherrypy.response.headers['Content-Type'] = 'application/json' return json.dumps([ { 'name': event.name, 'location': event.location_label, 'start': event.start_time_local.strftime('%I%p %a').lstrip('0'), 'end': event.end_time_local.strftime('%I%p %a').lstrip('0'), 'start_unix': int(mktime(event.start_time.utctimetuple())), 'end_unix': int(mktime(event.end_time.utctimetuple())), 'duration': event.minutes, 'description': event.description, 'panelists': [panelist.attendee.full_name for panelist in event.assigned_panelists] } for event in sorted(session.query(Event).all(), key=lambda e: [e.start_time, e.location_label]) ], indent=4).encode('utf-8') @unrestricted def now(self, session, when=None): if when: now = c.EVENT_TIMEZONE.localize(datetime(*map(int, when.split(',')))) else: now = c.EVENT_TIMEZONE.localize(datetime.combine(localized_now().date(), time(localized_now().hour))) current, upcoming = [], [] for loc, desc in c.EVENT_LOCATION_OPTS: approx = session.query(Event).filter(Event.location == loc, Event.start_time >= now - timedelta(hours=6), Event.start_time <= now).all() for event in approx: if now in event.half_hours: current.append(event) next = session.query(Event) \ .filter(Event.location == loc, Event.start_time >= now + timedelta(minutes=30), Event.start_time <= now + timedelta(hours=4)) \ .order_by('start_time').all() if next: upcoming.extend(event for event in next if event.start_time == next[0].start_time) return { 'now': now if when else localized_now(), 'current': current, 'upcoming': upcoming } def form(self, session, message='', panelists=(), **params): event = session.event(params, allowed=['location', 'start_time']) if 'name' in params: session.add(event) # Associate a panel app with this event, and if the event is new, use the panel app's name and title if 'panel_id' in params and params['panel_id']: add_panel = session.panel_application(id=params['panel_id']) add_panel.event_id = event.id sess
ion.add(add_panel) if event.is_new: event.name = add_panel.name event.description = add_panel.description for pa in add_panel.applicants: if pa.attendee_id: assigned_panelist = AssignedPanelist(attendee_id=pa.attendee.id, event_id=event.id)
session.add(assigned_panelist)
rt boto3 import boto from boto.exception import EC2ResponseError import sure # noqa from moto import mock_ec2 SAMPLE_DOMAIN_NAME = u'example.com' SAMPLE_NAME_SERVERS = [u'10.0.0.6', u'10.0.0.7'] @mock_ec2 def test_vpcs(): conn = boto.connect_vpc('the_key', 'the_secret') vpc = conn.create_vpc("10.0.0.0/16") vpc.cidr_block.should.equal('10.0.0.0/16') all_vpcs = conn.get_all_vpcs() all_vpcs.should.have.length_of(1) vpc.delete() all_vpcs = conn.get_all_vpcs() all_vpcs.should.have.length_of(0) with assert_raises(EC2ResponseError) as cm: conn.delete_vpc("vpc-1234abcd") cm.exception.code.should.equal('InvalidVpcID.NotFound') cm.exception.status.should.equal(400) cm.exception.request_id.should_not.be.none @mock_ec2 def test_vpc_defaults(): conn = boto.connect_vpc('the_key', 'the_secret') vpc = conn.create_vpc("10.0.0.0/16") conn.get_all_vpcs().should.have.length_of(1) conn.get_all_route_tables().should.have.length_of(1) conn.get_all_security_groups(filters={'vpc-id': [vpc.id]}).should.have.length_of(1) vpc.delete() conn.get_all_vpcs().should.have.length_of(0) conn.get_all_route_tables().should.have.length_of(0) conn.get_all_security_groups(filters={'vpc-id': [vpc.id]}).should.have.length_of(0) @mock_ec2 def test_vpc_tagging(): conn = boto.connect_vpc() vpc = conn.create_vpc("10.0.0.0/16") vpc.add_tag("a key", "some value") tag = conn.get_all_tags()[0] tag.name.should.equal("a key") tag.value.should.equal("some value") # Refresh the vpc vpc = conn.get_all_vpcs()[0] vpc.tags.should.have.length_of(1) vpc.tags["a key"].should.equal("some value") @mock_ec2 def test_vpc_get_by_id(): conn = boto.connect_vpc() vpc1 = conn.create_vpc("10.0.0.0/16") vpc2 = conn.create_vpc("10.0.0.0/16") conn.create_vpc("10.0.0.0/16") vpcs = conn.get_all_vpcs(vpc_ids=[vpc1.id, vpc2.id]) vpcs.should.have.length_of(2) vpc_ids = tuple(map(lambda v: v.id, vpcs)) vpc1.id.should.be.within(vpc_ids) vpc2.id.should.be.within(vpc_ids) @mock_ec2 def test_vpc_get_by_cidr_block(): conn = boto.connect_vpc() vpc1 = conn.create_vpc("10.0.0.0/16") vpc2 = conn.create_vpc("10.0.0.0/16") conn.create_vpc("10.0.0.0/24") vpcs = conn.get_all_vpcs(filters={'cidr': '10.0.0.0/16'}) vpcs.should.have.length_of(2) vpc_ids = tuple(map(lambda v: v.id, vpcs)) vpc1.id.should.be.within(vpc_ids) vpc2.id.should.be.within(vpc_ids) @mock_ec2 def test_vpc_get_by_dhcp_options_id(): conn = boto.connect_vpc() dhcp_options = conn.create_dhcp_options(SAMPLE_DOMAIN_NAME, SAMPLE_NAME_SERVERS) vpc1 = conn.create_vpc("10.0.0.0/16") vpc2 = conn.create_vpc("10.0.0.0/16") conn.create_vpc("10.0.0.0/24") conn.associate_dhcp_options(dhcp_
options.id, vpc1.id) conn.associate_dhcp_options(dhcp_options.id, vpc2.id) vpcs = conn.get_all_vpcs(filters={'dhcp-options-id': dhcp_options.id}) vpcs.should.have.length_of(2) vpc_ids = tuple(map(lambda v: v.id, vpcs)) vpc1.id.should.be.within(vpc_ids) vpc2.id.should.be.within(vpc_ids)
@mock_ec2 def test_vpc_get_by_tag(): conn = boto.connect_vpc() vpc1 = conn.create_vpc("10.0.0.0/16") vpc2 = conn.create_vpc("10.0.0.0/16") vpc3 = conn.create_vpc("10.0.0.0/24") vpc1.add_tag('Name', 'TestVPC') vpc2.add_tag('Name', 'TestVPC') vpc3.add_tag('Name', 'TestVPC2') vpcs = conn.get_all_vpcs(filters={'tag:Name': 'TestVPC'}) vpcs.should.have.length_of(2) vpc_ids = tuple(map(lambda v: v.id, vpcs)) vpc1.id.should.be.within(vpc_ids) vpc2.id.should.be.within(vpc_ids) @mock_ec2 def test_vpc_get_by_tag_key_superset(): conn = boto.connect_vpc() vpc1 = conn.create_vpc("10.0.0.0/16") vpc2 = conn.create_vpc("10.0.0.0/16") vpc3 = conn.create_vpc("10.0.0.0/24") vpc1.add_tag('Name', 'TestVPC') vpc1.add_tag('Key', 'TestVPC2') vpc2.add_tag('Name', 'TestVPC') vpc2.add_tag('Key', 'TestVPC2') vpc3.add_tag('Key', 'TestVPC2') vpcs = conn.get_all_vpcs(filters={'tag-key': 'Name'}) vpcs.should.have.length_of(2) vpc_ids = tuple(map(lambda v: v.id, vpcs)) vpc1.id.should.be.within(vpc_ids) vpc2.id.should.be.within(vpc_ids) @mock_ec2 def test_vpc_get_by_tag_key_subset(): conn = boto.connect_vpc() vpc1 = conn.create_vpc("10.0.0.0/16") vpc2 = conn.create_vpc("10.0.0.0/16") vpc3 = conn.create_vpc("10.0.0.0/24") vpc1.add_tag('Name', 'TestVPC') vpc1.add_tag('Key', 'TestVPC2') vpc2.add_tag('Name', 'TestVPC') vpc2.add_tag('Key', 'TestVPC2') vpc3.add_tag('Test', 'TestVPC2') vpcs = conn.get_all_vpcs(filters={'tag-key': ['Name', 'Key']}) vpcs.should.have.length_of(2) vpc_ids = tuple(map(lambda v: v.id, vpcs)) vpc1.id.should.be.within(vpc_ids) vpc2.id.should.be.within(vpc_ids) @mock_ec2 def test_vpc_get_by_tag_value_superset(): conn = boto.connect_vpc() vpc1 = conn.create_vpc("10.0.0.0/16") vpc2 = conn.create_vpc("10.0.0.0/16") vpc3 = conn.create_vpc("10.0.0.0/24") vpc1.add_tag('Name', 'TestVPC') vpc1.add_tag('Key', 'TestVPC2') vpc2.add_tag('Name', 'TestVPC') vpc2.add_tag('Key', 'TestVPC2') vpc3.add_tag('Key', 'TestVPC2') vpcs = conn.get_all_vpcs(filters={'tag-value': 'TestVPC'}) vpcs.should.have.length_of(2) vpc_ids = tuple(map(lambda v: v.id, vpcs)) vpc1.id.should.be.within(vpc_ids) vpc2.id.should.be.within(vpc_ids) @mock_ec2 def test_vpc_get_by_tag_value_subset(): conn = boto.connect_vpc() vpc1 = conn.create_vpc("10.0.0.0/16") vpc2 = conn.create_vpc("10.0.0.0/16") conn.create_vpc("10.0.0.0/24") vpc1.add_tag('Name', 'TestVPC') vpc1.add_tag('Key', 'TestVPC2') vpc2.add_tag('Name', 'TestVPC') vpc2.add_tag('Key', 'TestVPC2') vpcs = conn.get_all_vpcs(filters={'tag-value': ['TestVPC', 'TestVPC2']}) vpcs.should.have.length_of(2) vpc_ids = tuple(map(lambda v: v.id, vpcs)) vpc1.id.should.be.within(vpc_ids) vpc2.id.should.be.within(vpc_ids) @mock_ec2 def test_default_vpc(): ec2 = boto3.resource('ec2', region_name='us-west-1') # Create the default VPC default_vpc = ec2.create_vpc(CidrBlock='172.31.0.0/16') default_vpc.reload() default_vpc.is_default.should.be.ok # Test default values for VPC attributes response = default_vpc.describe_attribute(Attribute='enableDnsSupport') attr = response.get('EnableDnsSupport') attr.get('Value').should.be.ok response = default_vpc.describe_attribute(Attribute='enableDnsHostnames') attr = response.get('EnableDnsHostnames') attr.get('Value').should.be.ok @mock_ec2 def test_non_default_vpc(): ec2 = boto3.resource('ec2', region_name='us-west-1') # Create the default VPC ec2.create_vpc(CidrBlock='172.31.0.0/16') # Create the non default VPC vpc = ec2.create_vpc(CidrBlock='10.0.0.0/16') vpc.reload() vpc.is_default.shouldnt.be.ok # Test default values for VPC attributes response = vpc.describe_attribute(Attribute='enableDnsSupport') attr = response.get('EnableDnsSupport') attr.get('Value').should.be.ok response = vpc.describe_attribute(Attribute='enableDnsHostnames') attr = response.get('EnableDnsHostnames') attr.get('Value').shouldnt.be.ok @mock_ec2 def test_vpc_modify_enable_dns_support(): ec2 = boto3.resource('ec2', region_name='us-west-1') # Create the default VPC ec2.create_vpc(CidrBlock='172.31.0.0/16') vpc = ec2.create_vpc(CidrBlock='10.0.0.0/16') # Test default values for VPC attributes response = vpc.describe_attribute(Attribute='enableDnsSupport') attr = response.get('EnableDnsSupport') attr.get('Value').should.be.ok vpc.modify_attribute(EnableDnsSupport={'Value': False}) response = vpc.describe_attribute(Attribute='enableDnsSupport') attr = response.get('EnableDnsSupport') attr.get('Value').shouldnt.be.ok @mock_ec2 def test_vpc_modify_enable_dns_hostnames(): ec2 = boto3.resource('ec2', region_name='us-west-1') # Create the default VPC ec2.create_vpc(CidrBlock='17
import datetime import logging import textwrap import time import click import hatarake import hatarake.net as requests from hatarake.config import Config logger = logging.getLogger(__name__) @click.group() @click.option('-v', '--verbosity', count=True) def main(verbosity): logging.basicConfig(level=logging.WARNING - verbosity * 10) logging.getLogger('gntp').setLevel(logging.ERROR - verbosity * 10) @main.command() @click.option('--start', help='start time') @click.argument('duration', type=int) @click.argument('title') def submit(start, duration, title): '''Submit a pomodoro to the server''' config = Config(hatarake.CONFIG_PATH) api = config.get('server', 'api') token = config.get('server', 'token') response = requests.post( api, headers={ 'Authorization': 'Token %s' % token, }, data={ 'created': start, 'duration': duration, 'title': title, } ) response.raise_for_status() click.echo(response.text) @main.command() @click.option('--duration', type=int, default=2) @click.option('--api_server', envvar='HATARAKE_API_SERVER') @click.option('--api_token', envvar='HATARAKE_API_TOKEN') @click.argument('title') def append(duration, title, api_server=None, api_token=None): '''Append time to a pomodoro''' config = Config(hatarake.CONFIG_PATH) api = api_server if api_server else config.get('server', 'api') token = api_token if api_token else config.get('server', 'token') end = datetime.datetime.utcnow().replace(microsecond=0) start = end - datetime.timedelta(minutes=duration) # Split the tags out of the title # For now, we remove the tags
from the final title to make things neater # but in the future, may want to leave the hash tag in the full title tags = {tag.strip("#") for tag in title.split() if tag.startswith("#")} title = ' '.join({tag for tag in title.split() if not tag
.startswith('#')}) response = requests.post( api + '/append', headers={ 'Authorization': 'Token %s' % token, }, data={ 'start': start.isoformat(), 'end': end.isoformat(), 'category': tags, 'title': title, } ) response.raise_for_status() click.echo(response.text) @main.command() @click.option('--api_server', envvar='HATARAKE_API_SERVER') @click.option('--api_token', envvar='HATARAKE_API_TOKEN') @click.argument('label') @click.argument('duration', type=int) def countdown(api_server, api_token, label, duration): '''Submit a new countdown''' config = Config(hatarake.CONFIG_PATH) api = api_server if api_server else config.get('countdown', 'api') token = api_token if api_token else config.get('countdown', 'token') created = datetime.datetime.now() + datetime.timedelta(minutes=duration) response = requests.put( api, headers={ 'Authorization': 'Token %s' % token, }, data={ 'created': created.replace(microsecond=0).isoformat(), 'label': label, } ) response.raise_for_status() click.echo(response.text) @main.command() @click.argument('key') @click.argument('value') def stat(key, value): '''Submit stat data to server''' config = Config(hatarake.CONFIG_PATH) response = requests.post( config.get('stat', 'api'), headers={ 'Authorization': 'Token %s' % config.get('stat', 'token'), }, data={ 'key': key, 'value': value, } ) logger.info('POSTing to %s %s', response.request.url, response.request.body) response.raise_for_status() click.echo(response.text) @main.command() @click.argument('name', default='heartbeat') def heartbeat(name): config = Config(hatarake.CONFIG_PATH) url = config.get('prometheus', 'pushgateway') payload = textwrap.dedent(''' # TYPE {name} gauge # HELP {name} Last heartbeat based on unixtimestamp {name} {time} ''').format(name=name, time=int(time.time())).lstrip() response = requests.post(url, data=payload) response.raise_for_status() click.echo(response.text)
# (C) Datadog, Inc. 2010-2016 # All rights reserved # Licensed under Simplified BSD License (see LICENSE) # 3p import requests # project from checks import AgentCheck from util import headers class PHPFPMCheck(AgentCheck): """ Tracks basic php-fpm metrics via the status module Requires php-fpm pools to have the status option. See http://www.php.net/manual/de/install.fpm.configuration.php#pm.status-path for more details """ SERVICE_CHECK_NAME = 'php_fpm.can_ping' GAUGES = { 'listen queue': 'php_fpm.listen_queue.size', 'idle processes': 'php_fpm.processes.idle', 'active processes': 'php_fpm.processes.active', 'total processes': 'php_fpm.processes.total', } MONOTONIC_COUNTS = { 'accepted conn': 'php_fpm.requests.accepted', 'max children reached': 'php_fpm.processes.max_reached', 'slow requests': 'php_fpm.requests.slow', } def check(self, instance): status_url = instance.get('status_url') ping_url = instance.get('ping_url') ping_reply = instance.get('ping_reply') auth = None user = instance.get('user') password = instance.get('password') tags = instance.get('tags', []) http_host = instance.get('http_host') if user and password: auth = (user, password) if status_url is None and ping_url is None: raise Exception("No status_url or ping_url specified for this instance") pool = None status_exception = None if status_url is not None: try: pool = self._process_status(status_url, auth, tags, http_host) except Exception as e: status_exception = e pass if ping_url is not None: self._process_ping(ping_url, ping_reply, auth, tags, pool, http_host) # pylint doesn't understand that we are raising this only if it's here if status_exception is not None: raise status_exception # pylint: disable=E0702 def _process_status(self, status_url, auth, tags, http_host): data = {} try: # TODO: adding the 'full' parameter gets you per-process detailed # informations, which could be nice to parse and output as metrics resp = requests.get(status_url, auth=auth, headers=headers(self.agentConfig, http_host=http_host), params={'json': True}) resp.raise_for_status() data = resp.json() except Exception as e: self.log.error("Failed to get metrics from {0}.\nError {1}".format(status_url, e)) raise pool_name = data.get('pool', 'default') metric_tags = tags + ["pool:{0}".format(pool_name)] for key, mname in self.GAUGES.iteritems(): if key not in data: self.log.warn("Gauge metric {0} is missing from FPM status".format(key)) continue self.gauge(mname, int(data[key]), tags=metric_tags) for key, mname in self.MONOTONIC_COUNTS.iteritems(): if key not in data: self.log.warn("Counter metric {0} is missing from FPM status".format(key)) continue self.monotonic_count(mname, int(data[key]), tags=metric_tags) # return pool, to tag the service check with it if we have one return pool_name def _process_ping(self, ping_url, ping_reply, auth, tags, pool_name, http_host): if ping_reply is None: ping_reply = 'pong' sc_tags = ["ping_url:{0}".format(ping_url)] try: # TODO: adding the 'full' parameter gets you per-process detailed # informations, which could be nice to parse and output as metrics resp = requests.get(ping_url, auth=auth, headers=headers(self.agentConfig, http_host=http_host)) resp.raise_for_status() if ping_reply not in resp.text: raise Exception("Received unexpected reply to ping {0}".format(resp.text)) except Exception as e: self.log.error("Failed to ping FPM pool {0} on URL {1}." "\nError {2}".format(pool_name, ping_ur
l, e)) self.service_check(self.SERVICE_CHECK_NAME, AgentCheck.CRITICAL, tags=sc_tags, message=str(e)) else: self.service_check(self.SERV
ICE_CHECK_NAME, AgentCheck.OK, tags=sc_tags)
miss_penalty) def false_detection(self): self.marker(self.markerbase+4) self.rewardlogic.score_event(self.false_penalty) def correct(self): if self.focused: if ((self.cueobj is not None) and self.cueobj.iscued): self.marker(self.markerbase+5 if self.control else self.markerbase+6) else: self.marker(self.markerbase+7 if self.control else self.markerbase+8) if self.control == ((self.cueobj is not None) and self.cueobj.iscued): # the user correctly spots the warning event self.sound(self.snd_hit,**self.snd_params) self.rewardlogic.score_event(self.hit_reward) else: # the user spotted it, but didn't get the cue right self.sound(self.snd_wrongcue,**self.snd_params) self.rewardlogic.score_event(self.false_penalty) else: self.marker(self.markerbase+9) # the user spotted it, but was not tasked to do so... self.rewardlogic.score_event(self.false_penalty) def flash(self,status,duration=1): self.picture(self.pic_on if status else self.pic_off,duration=duration, **self.pic_params) class CueLight(LatentModule): """ The yellow cue light (SYSMONV). """ def __init__(self, rewardlogic, focused = True, # whether this task is currently focused markerbase = 1, # markers markerbase..markerbase+6 are used event_interval=lambda: random.uniform(45,85), # interval between two successive events pic_off='light_off.png', # picture to display for the disabled light pic_on='light_on.png', # picture to display for the enabled light screen_offset=0, # offset to position this icon on one of the three screens pic_params={'pos':[0,0],'scale':0.15}, # parameters for the picture() command duration = 1.5, # duration for which the cue light stays on ): LatentModule.__init__(self) self.rewardlogic = rewardlogic self.focused = focused self.markerbase = markerbase self.event_interval = event_interval self.pic_off = pic_off self.pic_on = pic_on self.pic_params = pic_params self.screen_offset = screen_offset self.duration = duration self.pic_params = copy.deepcopy(pic_params) self.iscued = False def run(self): self.pic_params['pos'][0] += self.screen_offset # pre-cache the media files... self.precache_picture(self.pic_on) self.precache_picture(self.pic_off) while True: if not self.focused: self.iscued = False # show the "off" picture for the inter-event interval self.picture(self.pic_off, self.event_interval(), **self.pic_params) # show the "on" picture and cue the other items self.marker(self.markerbase+1) if self.focused: self.iscued = True self.picture(self.pic_on, self.duration, **self.pic_params) def flash(self,status,duration=1): self.picture(self.pic_on if status else self.pic_off,duration=duration, **self.pic_params) class WarningSound(LatentModule): """ The warning sounds (SYSMONA). """ def __init__(self, # general properties rewardlogic, # reward handling logic watcher = None, # response event watcher focused = True, # whether this task is currently focused markerbase = 1, # markers markerbase..markerbase+6 are used event_interval=lambda: random.uniform(45,85), # interval between two successive events # cueing control cueobj = None, # an object that might have .iscued set to true # audio parameters screen_offset=0, # offset to position this source on one of the three screens snd_on='xHyprBlip.wav', # sound to play in case of an event snd_params={'volume':0.25,'direction':0.0}, # parameters for the sound() command # response handling snd_hit='click2s.wav', # sound when the user correctly detected the warning state snd_wrongcue='xBuzz01.wav', # the sound that is overlaid with the buzzer when the response was wrong due to incorrect cueing response_key='sysmona-check', # key to press in case of an event timeout=5.5, # response timeout for the user hit_reward=0, # reward if hit miss_penalty=-20, # penalty if missed false_penalty=-5, # penalty for false positives # ticking support snd_tick_off=None, # optional ticking in off status snd_tick_on=None, # optional ticking in on status tick_rate = None, # tick rate (duration in non-tick status, duration in tick status) ): LatentModule.__init__(self) self.rewardlogic = rewardlogic self.focused = focused self.markerbase = markerbase self.event_interval = event_interval self.snd_on = snd_on self.snd_params = snd_params self.snd_wrongcue = snd_wrongcue self.snd_hit = snd_hit self.response_key = response_key self.timeout = timeout self.hit_reward = hit_reward self.miss_penalty = miss_penalty self.false_penalty = false_penalty self.screen_offset = screen_offset self.snd_params = copy.deepcopy(snd_params) self.cueobj = cueobj self.control = False self.snd_tick_off = snd_tick_off self.snd_tick_on = snd_tick_on self.tick_rate = tick_rate self.watcher = watcher def run(self): self.snd_params['direction'] += self.screen_offset # pre-cache the media files... self.precache_sound(self.snd_on) self.precache_sound(self.snd_tick_on) self.precache_sound(self.snd_tick_off) self.precache_sound(self.snd_wrongcue) self.precache_sound(self.snd_hit) self.accept('control',self.oncontrol,[True]) self.accept('control-up',self.oncontrol,[False]) # set up an event watcher (taking care of timeouts and inappropriate responses) if self.watcher is None: self.watcher = EventWatcher(eventtype=self.response_key,
handleduration=self.timeout,
defaulthandler=self.false_detection) while True: # off status if self.tick_rate is not None: t_end = time.time()+self.event_interval() while time.time() < t_end: self.marker(self.markerbase+10) # play the off/tic snd self.sound(self.snd_tick_off, **self.snd_params) self.sleep(self.tick_rate[1]) # wait self.sleep(self.tick_rate[0]) else: # wait self.sleep(self.event_interval())
import sqlite3 import time conn = sqlite3.connect('log.db') c = conn.cursor() # Create table c.execute("CREATE T
ABLE if not exists log (log_timestamp DECIMAL(12,8), " "log_source text, msg_sequence integer, log_message text, statu
s text)") for x in range(0, 1000): insertquery = "INSERT INTO log (log_timestamp, log_source, msg_sequence, log_message) " \ "VALUES ({0},'tst source', {1}, 'log message')".format(time.time(), x) c.execute(insertquery) conn.commit() conn.close()
# -*- coding: utf-8 -*- import functools import httplib as http import logging import time import bleach from django.db.models import Q from flask import request from framework.auth.decorators import collect_auth from framework.auth.decorators import must_be_logged_in from framework.exceptions import HTTPError from framework import sentry from website import language from osf.models import OSFUser, AbstractNode from website import settings from website.project.views.contributor import get_node_contributors_abbrev from website.ember_osf_web.decorators import ember_flag_is_active from website.search import exceptions import website.search.search as search from website.search.util import build_query logger = logging.getLogger(__name__) RESULTS_PER_PAGE = 250 def handle_search_errors(func): @functools.wraps(func) def wrapped(*args, **kwargs): try: return func(*args, **kwargs) except exceptions.MalformedQueryError: raise HTTPError(http.BAD_REQUEST, data={ 'message_short': 'Bad search query', 'message_long': language.SEARCH_QUERY_HELP, }) except exceptions.SearchUnavailableError: raise HTTPError(http.SERVICE_UNAVAILABLE, data={ 'message_short': 'Search unavailable', 'message_long': ('Our search service is currently unavailable, if the issue persists, ' + language.SUPPORT_LINK), }) except exceptions.SearchException: # Interim fix for issue where ES fails with 500 in some settings- ensure exception is still logged until it can be better debugged. See OSF-4538 sentry.log_exception() sentry.log_message('Elasticsearch returned an unexpected error response') # TODO: Add a test; may need to mock out the error response due to inability to reproduce error code locally raise HTTPError(http.BAD_REQUEST, data={ 'message_short': 'Could not perform search query', 'message_long': language.SEARCH_QUERY_HELP, }) return wrapped @handle_search_errors def search_search(**kwargs): _type = kwargs.get('type', None) tick = time.time() results = {} if request.method == 'POST': results = search.search(request.get_json(), doc_type=_type) elif request.method == 'GET': q = request.args.get('q', '*') # TODO Match javascript params? start = request.args.get('from', '0') size = request.args.get('size', '10') results = search.search(build_query(q, start, size), doc_type=_type) results['time'] = round(time.time() - tick, 2) return results @ember_flag_is_active('ember_search_page') def search_view(): return {'shareUrl': settings.SHARE_URL}, def conditionally_add_query_item(query, item, condition, value): """ Helper for the search_projects_by_title function which will add a condition to a query It will give an error if the proper search term is not used. :param query: The modular ODM query that you want to modify :param item: the field to query on :param condition: yes, no, or either :return: the modified query """ condition = condition.lower() if condition == 'yes': return query & Q(**{item: value}) elif condition == 'no': return query & ~Q(**{item: value}) elif condition == 'either': return query raise HTTPError(http.BAD_REQUEST) @must_be_logged_in def search_projects_by_title(**kwargs): """ Search for nodes by title. Can pass in arguments from the URL to modify the search :arg term: The substring of the title. :arg category: Category of the node. :arg isDeleted: yes, no, or either. Either will not add a qualifier for that argument in the search. :arg isFolder: yes, no, or either. Either will not add a qualifier for that argument in the search. :arg isRegistration: yes, no, or either. Either will not add a qualifier for that argument in the search. :arg includePublic: yes or no. Whether the projects listed should include public projects. :arg includeContributed: yes or no. Whether the search should include projects the current user has contributed to. :arg ignoreNode: a list of nodes that should not be included in the search. :return: a list of dictionaries of projects """ # TODO(fabianvf): At some point, it would be nice to do this with elastic search user = kwargs['auth'].user term = request.args.get('term', '') max_results = int(request.args.get('maxResults', '10')) category = request.args.get('category', 'project').lower() is_deleted = request.args.get('isDeleted', 'no').lower() is_collection = request.args.get('isFolder', 'no').lower() is_registration = request.args.get('isRegistration', 'no').lower() include_public = request.args.get('includePublic', 'yes').lower() include_contributed = request.args.get('includeContributed', 'yes').lower() ignore_nodes = request.args.getlist('ignoreNode', []) matching_title = Q( title__icontains=term, # search term (case insensitive) category=category # is a project ) matching_title = conditionally_add_query_item(
matching_title, 'is_deleted', is_deleted, True) matching_title = conditionally_add_query_item(matching_title, 'type', is_registration, 'osf.registration') matching_title = conditionally_add_query_item(matching_title, 'type', is_collection, 'osf.collection') if len(ignore_nodes) > 0: for node_id in ignore_nodes: matching_title = matching_title & ~Q(_id=node_id) my_projects = [] my_project_count = 0 public_projects = [] if include_contributed == 'yes':
my_projects = AbstractNode.objects.filter( matching_title & Q(_contributors=user) # user is a contributor )[:max_results] my_project_count = my_project_count if my_project_count < max_results and include_public == 'yes': public_projects = AbstractNode.objects.filter( matching_title & Q(is_public=True) # is public )[:max_results - my_project_count] results = list(my_projects) + list(public_projects) ret = process_project_search_results(results, **kwargs) return ret @must_be_logged_in def process_project_search_results(results, **kwargs): """ :param results: list of projects from the modular ODM search :return: we return the entire search result, which is a list of dictionaries. This includes the list of contributors. """ user = kwargs['auth'].user ret = [] for project in results: authors = get_node_contributors_abbrev(project=project, auth=kwargs['auth']) authors_html = '' for author in authors['contributors']: a = OSFUser.load(author['user_id']) authors_html += '<a href="%s">%s</a>' % (a.url, a.fullname) authors_html += author['separator'] + ' ' authors_html += ' ' + authors['others_count'] ret.append({ 'id': project._id, 'label': project.title, 'value': project.title, 'category': 'My Projects' if user in project.contributors else 'Public Projects', 'authors': authors_html, }) return ret @collect_auth def search_contributor(auth): user = auth.user if auth else None nid = request.args.get('excludeNode') exclude = AbstractNode.load(nid).contributors if nid else [] # TODO: Determine whether bleach is appropriate for ES payload. Also, inconsistent with website.sanitize.util.strip_html query = bleach.clean(request.args.get('query', ''), tags=[], strip=True) page = int(bleach.clean(request.args.get('page', '0'), tags=[], strip=True)) size = int(bleach.clean(request.args.get('size', '5'), tags=[], strip=True)) return search.search_contributor(query=query, page=page, size=size, exclude=exclude, current_user=user)
from __future__ import print_function import unittest import RMF class Tests(unittest.TestCase): def test_multiparent(self): """Test that nodes with multiple parents can be used and resolve""" for suffix in RMF.suffixes: path = RMF._get_temporary_file_path("alias2." + suffix) print(path) fh = RMF.create_rmf_file(path) rh = fh.get_root_node()
nh = rh.add_child("hi", RMF.REPRESENTATION) nh.add_child(rh) ch = nh.get_children() self.assertEqual(len(ch), 1) print(ch) self.assertEqual(ch[0], rh) def test_aliases(self): """Test that aliases can be used and resolve""" for suffix in RMF.suffixes: path = RMF._
get_temporary_file_path("alias." + suffix) print(path) fh = RMF.create_rmf_file(path) print("create factory") af = RMF.AliasFactory(fh) rh = fh.get_root_node() nh = rh.add_child("hi", RMF.REPRESENTATION) af.get(nh.add_child("alias", RMF.ALIAS)).set_aliased(rh) ch = nh.get_children() self.assertEqual(len(ch), 1) print(ch) print("final check") print(af.get(ch[0]).get_aliased()) self.assertEqual(af.get(ch[0]).get_aliased(), rh) print("done") if __name__ == '__main__': unittest.main()
#!/usr/bin/env python # encoding: utf-8 # Thomas Nagy, 2006-2010 (ita) # Ralf Habacker, 2006 (rh) # Yinon Ehrlich, 2009 """ clang/llvm detection. """ import os, sys from waflib import Configure, Options, Utils from waflib.Tools import ccroot, ar from waflib.Configure import conf @conf def find_clang(conf): """ Find the program clang, and if present, try to detect its version number """ cc = conf.find_program(['clang', 'cc'], var='CC') cc = conf.cmd_to_list(cc) conf.get_cc_version(cc, gcc=True) conf.env.CC_NAME = 'clang' conf.env.CC = cc @conf def clang_common_flags(conf): """ Common flags for clang on nearly all platforms """ v = conf.env v['CC_SRC_F'] = [] v['CC_TGT_F'] = ['-c', '-o'] # linker if not v['LINK_CC']: v['LINK_CC'] = v['CC'] v['CCLNK_SRC_F'] = [] v['CCLNK_TGT_F'] = ['-o'] v['CPPPATH_ST'] = '-I%s' v['DEFINES_ST'] = '-D%s' v['LIB_ST'] = '-l%s' # template for adding libs v['LIBPATH_ST'] = '-L%s' # template for adding libpaths v['STLIB_ST'] = '-l%s' v['STLIBPATH_ST'] = '-L%s' v['RPATH_ST'] = '-Wl,-rpath,%s' v['SONAME_ST'] = '-Wl,-h,%s' v['SHLIB_MARKER'] = '-Wl,-Bdynamic' v['STLIB_MARKER'] = '-Wl,-Bstatic' # program v['cprogram_PATTERN'] = '%s' # shared librar v['CFLAGS_cshlib'] = ['-fPIC'] v['LINKFLAGS_cshlib'] = ['-shared'] v['cshlib_PATTERN'] = 'lib%s.so' # static lib v['LINKFLAGS_cstlib'] = ['-Wl,-Bstatic'] v['cstlib_PATTERN'] = 'lib%s.a' # osx stuff v['LINKFLAGS_MACBUNDLE'] = ['-bundle', '-undefined', 'dynamic_lookup'] v['CFLAGS_MACBUNDLE'] = ['-fPIC'] v['macbundle_PATTERN'] = '%s.bundle' @conf def clang_modifier_win32(conf): """Configuration flags for executing clang on Windows""" v = conf.env v['cprogram_PATTERN'] = '%s
.exe' v['cshlib_PATTERN'] = '%s.dll' v['implib_PATTERN'] = 'lib%s.dll.a' v['IMPLIB_ST'] = '-Wl,--out-implib,%s' v['CFLAGS_cshlib'] = [] v.append_value('CFLAGS_cshlib', ['-DDLL_EXPORT']) # TODO adding non
standard defines like this DLL_EXPORT is not a good idea # Auto-import is enabled by default even without this option, # but enabling it explicitly has the nice effect of suppressing the rather boring, debug-level messages # that the linker emits otherwise. v.append_value('LINKFLAGS', ['-Wl,--enable-auto-import']) @conf def clang_modifier_cygwin(conf): """Configuration flags for executing clang on Cygwin""" clang_modifier_win32(conf) v = conf.env v['cshlib_PATTERN'] = 'cyg%s.dll' v.append_value('LINKFLAGS_cshlib', ['-Wl,--enable-auto-image-base']) v['CFLAGS_cshlib'] = [] @conf def clang_modifier_darwin(conf): """Configuration flags for executing clang on MacOS""" v = conf.env v['CFLAGS_cshlib'] = ['-fPIC', '-compatibility_version', '1', '-current_version', '1'] v['LINKFLAGS_cshlib'] = ['-dynamiclib'] v['cshlib_PATTERN'] = 'lib%s.dylib' v['FRAMEWORKPATH_ST'] = '-F%s' v['FRAMEWORK_ST'] = ['-framework'] v['ARCH_ST'] = ['-arch'] v['LINKFLAGS_cstlib'] = [] v['SHLIB_MARKER'] = [] v['STLIB_MARKER'] = [] v['SONAME_ST'] = [] @conf def clang_modifier_aix(conf): """Configuration flags for executing clang on AIX""" v = conf.env v['LINKFLAGS_cprogram'] = ['-Wl,-brtl'] v['LINKFLAGS_cshlib'] = ['-shared','-Wl,-brtl,-bexpfull'] v['SHLIB_MARKER'] = [] @conf def clang_modifier_hpux(conf): v = conf.env v['SHLIB_MARKER'] = [] v['CFLAGS_cshlib'] = ['-fPIC','-DPIC'] v['cshlib_PATTERN'] = 'lib%s.sl' @conf def clang_modifier_platform(conf): """Execute platform-specific functions based on *clang_modifier_+NAME*""" # * set configurations specific for a platform. # * the destination platform is detected automatically by looking at the macros the compiler predefines, # and if it's not recognised, it fallbacks to sys.platform. clang_modifier_func = getattr(conf, 'clang_modifier_' + conf.env.DEST_OS, None) if clang_modifier_func: clang_modifier_func() def configure(conf): """ Configuration for clang """ conf.find_clang() conf.find_ar() conf.clang_common_flags() conf.clang_modifier_platform() conf.cc_load_tools() conf.cc_add_flags() conf.link_add_flags()
aramstring) elif first_check_source == "vanished" and check_source == "new": cluster_items[(check_type, item)] = ("old", first_paramstring) elif check_source == "vanished" and first_check_source == "new": cluster_items[(check_type, item)] = ("old", paramstring) # In all other cases either both must be "new" or "vanished" -> let it be # Now add manual and active serivce and handle ignored services merge_manual_services(cluster_items, hostname) return cluster_items # Get the list of service of a host or cluster and guess the current state of # all services if possible def get_check_preview(hostname, use_caches, do_snmp_scan, on_error): services = get_host_services(hostname, use_caches, do_snmp_scan, on_error) if is_cluster(hostname): ipaddress = None else: ipaddress = lookup_ipaddress(hostname) table = [] for (check_type, item), (check_source, paramstring) in services.items(): params = None if check_source not in [ 'legacy', 'active', 'custom' ]: # apply check_parameters try: if type(paramstring) == str: params = eval(paramstring) else: params = paramstring except: raise MKGeneralException("Invalid check parameter string '%s'" % paramstring) descr = service_description(check_type, item) global g_service_description g_service_description = descr infotype = check_type.split('.')[0] # Sorry. The whole caching stuff is the most horrible hack in # whole Check_MK. Nobody dares to clean it up, YET. But that # day is getting nearer... global opt_use_cachefile old_opt_use_cachefile = opt_use_cachefile opt_use_cachefile = True opt_dont_submit = True # hack for get_realhost_info, avoid skipping because of check interval if check_type not in check_info: continue # Skip not existing check silently try: exitcode = None perfdata = [] info = get_info_for_check(hostname, ipaddress, infotype) # Handle cases where agent does not output data except MKAgentError, e: exitcode = 3 output = "Error getting data from agent" if str(e): output += ": %s" % e tcp_error = output except MKSNMPError, e: exitcode = 3 output = "Error getting data from agent for %s via SNMP" % infotype if str(e): output += ": %s" % e snmp_error = output except Exception, e: exitcode = 3 output = "Error getting data for %s: %s" % (infotype, e) if check_uses_snmp(check_type): snmp_error = output else: tcp_error = output opt_use_cachefile = old_opt_use_cachefile global g_check_type, g_checked_item g_check_type = check_type g_checked_item = item if exitcode == None: check_function = check_info[check_type]["check_function"] if check_source != 'manual': params = compute_check_parameters(hostname, check_type, item, params) try: reset_wrapped_counters() result = sanitize_check_result(check_function(item, params, info), check_uses_snmp(check_type)) if last_counter_wrap(): raise last_counter_wrap() except MKCounterWrapped, e: result = (None, "WAITING - Counter based check, cannot be done offline") except Exception, e: if opt_debug: raise result = (3, "UNKNOWN - invalid output from agent or error in check implementation") if len(result) == 2: result = (result[0], result[1], []) exitcode, output, perfdata = result else: descr = item exitcode = None output = "WAITING - %s check, cannot be done offline" % check_source.title() perfdata = [] if check_source == "active": params = eval(paramstring) if check_source in [ "legacy", "active", "custom" ]: checkgroup = None if service_ignored(hostname, None, descr): check_source = "ignored" else: checkgroup = check_info[check_type]["group"] table.append((check_source, check_type, checkgroup, item, paramstring, params, descr, exitcode, output, perfdata)) return table #. # .--Autochecks----------------------------------------------------------. # | _ _ _ _ | # | / \ _ _| |_ ___ ___| |__ ___ ___| | _____ | # | / _ \| | | | __/ _ \ / __| '_ \ / _ \/ __| |/ / __| | # | / ___ \ |_| | || (_) | (__| | | | __/ (__| <\__ \ | # | /_/ \_\__,_|\__\___/ \___|_| |_|\___|\___|_|\_\___/ | # | | # +----------------------------------------------------------------------+ # |
Reading, parsing, writing, modifying autochecks files | # '----------------------------------------------------------------------' # Read automatically discovered checks of one host. # world: "config" -> File in var/check_mk/autochecks # "
active" -> Copy in var/check_mk/core/autochecks # Returns a table with three columns: # 1. check_type # 2. item # 3. parameters evaluated! def read_autochecks_of(hostname, world="config"): if world == "config": basedir = autochecksdir else: basedir = var_dir + "/core/autochecks" filepath = basedir + '/' + hostname + '.mk' if not os.path.exists(filepath): return [] try: autochecks_raw = eval(file(filepath).read()) except SyntaxError,e: if opt_verbose or opt_debug: sys.stderr.write("Syntax error in file %s: %s\n" % (filepath, e)) if opt_debug: raise return [] except Exception, e: if opt_verbose or opt_debug: sys.stderr.write("Error in file %s:\n%s\n" % (filepath, e)) if opt_debug: raise return [] # Exchange inventorized check parameters with those configured by # the user. Also merge with default levels for modern dictionary based checks. autochecks = [] for entry in autochecks_raw: if len(entry) == 4: # old format where hostname is at the first place entry = entry[1:] check_type, item, parameters = entry # With Check_MK 1.2.7i3 items are now defined to be unicode strings. Convert # items from existing autocheck files for compatibility. TODO remove this one day if type(item) == str: item = decode_incoming_string(item) autochecks.append((check_type, item, compute_check_parameters(hostname, check_type, item, parameters))) return autochecks # Read autochecks, but do not compute final check parameters, # also return a forth column with the raw string of the parameters. # Returns a table with three columns: # 1. check_type # 2. item # 3. parameter string, not yet evaluated! def parse_autochecks_file(hostname): def split_python_tuple(line): quote = None bracklev = 0 backslash = False for i, c in enumerate(line): if backslash: backslash = False continue elif c == '\\': backslash = True elif c == quote: quote = None # end of quoted string elif c in [ '"', "'" ]:
str("secure_sysvars") class InstSecureSysvars(Instruction): def execute(self, fr): fr.globalvar_set(0, fr.user) fr.globalvar_set(1, si.DBRef(db.getobj(fr.user).location)) fr.globalvar_set(2, fr.trigger) fr.globalvar_set(3, fr.command) @instr("!") class InstBang(Instruction): def execute(self, fr): fr.check_underflow(2) v = fr.data_pop(si.GlobalVar, si.FuncVar) val = fr.data_pop() if isinstance(v, si.GlobalVar): fr.globalvar_set(v.value, val) elif isinstance(v, si.FuncVar): fr.funcvar_set(v.value, val) def __str__(self): return "!" @instr("@") class InstAt(Instruction): def execute(self, fr): v = fr.data_pop(si.GlobalVar, si.FuncVar) if isinstance(v, si.GlobalVar): val = fr.globalvar_get(v.value) fr.data_push(val) elif isinstance(v, si.FuncVar): val = fr.funcvar_get(v.value) fr.data_push(val) def __str__(self): return "@" @instr("dup") class InstDup(Instruction): def execute(self, fr): a = fr.data_pop() fr.data_push(a) fr.data_push(a) @instr("shallow_copy") class InstShallowCopy(Instruction): def execute(self, fr): a = fr.data_pop() fr.data_push(a) fr.data_push(copy.copy(a)) @instr("deep_copy") class InstDeepCopy(Instruction): def execute(self, fr): a = fr.data_pop() fr.data_push(a) fr.data_push(copy.deepcopy(a)) @instr("?dup") class InstQDup(Instruction): def execute(self, fr): a = fr.data_pop() if isinstance(a, si.DBRef): if a.value != -1: fr.data_push(a) elif a: fr.data_push(a) fr.data_push(a) @instr("dupn") class InstDupN(Instruction): def execute(self, fr): n = fr.data_pop(int) fr.check_underflow(n) for i in range(n): fr.data_push(fr.data_pick(n)) @instr("ldup") class InstLDup(Instruction): def execute(self, fr): n = fr.data_pick(1) if not isinstance(n, int): raise MufRuntimeError("Expected integer argument.") n += 1 fr.check_underflow(n) for i in range(n): fr.data_push(fr.data_pick(n)) @instr("pop") class InstPop(Instruction): def execute(self, fr): fr.data_pop() @instr("popn") class InstPopN(Instruction): def execute(self, fr): n = fr.data_pop(int) fr.check_underflow(n) for i in range(n): fr.data_pop() @instr("swap") class InstSwap(Instruction): def execute(self, fr): fr.check_underflow(2) b = fr.data_pop() a = fr.data_pop() fr.data_push(b) fr.data_push(a) @instr("rot") class InstRot(Instruction): def execute(self, fr): fr.check_underflow(3) a = fr.data_pull(3) fr.data_push(a) @instr("-rot") class InstNegRot(Instruction): def execute(self, fr): fr.check_underflow(3) c = fr.data_pop() b = fr.data_pop() a = fr.data_pop() fr.data_push(c) fr.data_push(a) fr.data_push(b) @instr("rotate") class InstRotate(Instruction): def execute(self, fr): num = fr.data_pop(int) fr.check_underflow(num) if not num: return if num < 0: a = fr.data_pop() fr.data_insert((-num) - 1, a) elif num > 0: a = fr.data_pull(num) fr.data_push(a) @instr("pick") class InstPick(
Instruction): def execute(self, fr): num = fr.data_pop(int) fr.check_underflow(num) if not num: return if num < 0: raise MufRuntimeError("Expected positive integer.") else: a = fr.data_pick(num) fr.data_push(a) @instr("over") class InstOver(Instruction): def execute(self, fr): fr.check_underflow(2) a = fr.data_pick(2) fr.dat
a_push(a) @instr("put") class InstPut(Instruction): def execute(self, fr): fr.check_underflow(2) num = fr.data_pop(int) val = fr.data_pop() fr.check_underflow(num) if not num: return if num < 0: raise MufRuntimeError("Value out of range") else: fr.data_put(num, val) @instr("nip") class InstNip(Instruction): def execute(self, fr): fr.check_underflow(3) b = fr.data_pop() a = fr.data_pop() fr.data_push(b) @instr("tuck") class InstTuck(Instruction): def execute(self, fr): fr.check_underflow(3) b = fr.data_pop() a = fr.data_pop() fr.data_push(b) fr.data_push(a) fr.data_push(b) @instr("reverse") class InstReverse(Instruction): def execute(self, fr): num = fr.data_pop(int) fr.check_underflow(num) if not num: return arr = [fr.data_pop() for i in range(num)] for val in arr: fr.data_push(val) @instr("lreverse") class InstLReverse(Instruction): def execute(self, fr): num = fr.data_pop(int) fr.check_underflow(num) if not num: return arr = [fr.data_pop() for i in range(num)] for val in arr: fr.data_push(val) fr.data_push(num) @instr("{") class InstMark(Instruction): def execute(self, fr): fr.data_push(si.Mark()) @instr("}") class InstMarkCount(Instruction): def execute(self, fr): for i in range(fr.data_depth()): a = fr.data_pick(i + 1) if isinstance(a, si.Mark): fr.data_pull(i + 1) fr.data_push(i) return raise MufRuntimeError("StackUnderflow") @instr("depth") class InstDepth(Instruction): def execute(self, fr): fr.data_push(fr.data_depth()) @instr("fulldepth") class InstFullDepth(Instruction): def execute(self, fr): fr.data_push(fr.data_full_depth()) @instr("variable") class InstVariable(Instruction): def execute(self, fr): vnum = fr.data_pop(int) fr.data_push(si.GlobalVar(vnum)) @instr("localvar") class InstLocalVar(Instruction): def execute(self, fr): vnum = fr.data_pop(int) fr.data_push(si.GlobalVar(vnum)) @instr("caller") class InstCaller(Instruction): def execute(self, fr): fr.data_push(fr.caller_get()) @instr("prog") class InstProg(Instruction): def execute(self, fr): fr.data_push(fr.program) @instr("trig") class InstTrig(Instruction): def execute(self, fr): fr.data_push(fr.trigger) @instr("cmd") class InstCmd(Instruction): def execute(self, fr): fr.data_push(fr.command) @instr("checkargs") class InstCheckArgs(Instruction): itemtypes = { 'a': ([si.Address], "address"), 'd': ([si.DBRef], "dbref"), 'D': ([si.DBRef], "valid object dbref"), 'e': ([si.DBRef], "exit dbref"), 'E': ([si.DBRef], "valid exit dbref"), 'f': ([si.DBRef], "program dbref"), 'F': ([si.DBRef], "valid program dbref"), 'i': ([int], "integer"), 'l': ([si.Lock], "lock"), 'p': ([si.DBRef], "player dbref"), 'P': ([si.DBRef], "valid player dbref"), 'r': ([si.DBRef], "room dbref"), 'R': ([si.DBRef], "valid room dbref"), 's': ([str], "string"), 'S': ([str], "non-null string"), 't': ([si.DBRef], "thing dbref"), 'T': ([si.DBRef], "valid thing dbref"), 'v': ([si.GlobalVar, si.FuncVar], "variable"), '?': ([], "any"), } objtypes = { 'D': "", 'P': "player", 'R': "room", 'T': "thing", 'E': "exit", 'F': "program", } def checkargs_part(self, fr, fmt, depth=1): count = "" pos = len(fmt) - 1 while pos >= 0: ch = fmt[pos] pos -= 1 if ch == " ": continue elif util.is_int(ch): count = ch + count continue elif ch == "}":
g3.status_at_node[mg3.nodes_at_left_edge] = FIXED_VALUE_BOUNDARY >>> mg3.status_at_node[mg3.nodes_at_top_edge] = CLOSED_BOUNDARY >>> mg3.status_at_node[mg3.nodes_at_bottom_edge] = CLOSED_BOUNDARY >>> mg3.status_at_node[mg3.nodes_at_right_edge] = CLOSED_BOUNDARY >>> fr3 = FlowRouter(mg3) >>> K_field = mg3.ones('node') # K can be a field >>> sp3 = FastscapeEroder(mg3, K_sp=K_field, m_sp=1., n_sp=0.6, ... threshold_sp=mg3.node_x, ... rainfall_intensity=2.) >>> fr3.run_one_step() >>> sp3.run_one_step(1.) >>> z.reshape((3, 7))[1, :] # doctest: +NORMALIZE_WHITESPACE array([ 0. , 0.0647484 , 0.58634455, 2.67253503, 8.49212152, 20.92606987, 36. ]) >>> previous_z = z.copy() >>> sp3.run_one_step(1., rainfall_intensity_if_used=0.) >>> np.allclose(z, previous_z) True ''' _name = 'FastscapeEroder' _input_var_names = ( 'topographic__elevation', 'drainage_area', 'flow__link_to_receiver_node', 'flow__upstream_node_order', 'flow__receiver_node', ) _output_var_names = ( 'topographic__elevation', ) _var_units = { 'topographic__elevation': 'm', 'drainage_area': 'm**2', 'flow__link_to_receiver_node': '-', 'flow__upstream_node_order': '-', 'flow__receiver_node': '-', } _var_mapping = { 'topographic__elevation': 'node', 'drainage_area': 'node', 'flow__link_to_receiver_node': 'node', 'flow__upstream_node_order': 'node', 'flow__receiver_node': 'node', } _var_doc = { 'topographic__elevation': 'Land surface topographic elevation', 'drainage_area': "Upstream accumulated surface area contributing to the node's " "discharge", 'flow__link_to_receiver_node': 'ID of link downstream of each node, which carries the discharge', 'flow__upstream_node_order': 'Node array containing downstream-to-upstream ordered list of ' 'node IDs', 'flow__receiver_node': 'Node array of receivers (node that receives flow from current ' 'node)', } @use_file_name_or_kwds def __init__(self, grid, K_sp=None, m_sp=0.5, n_sp=1., threshold_sp=0., rainfall_intensity=1., **kwds): """ Initialize the Fastscape stream power component. Note: a timestep, dt, can no longer be supplied to this component through the input file. It must instead be passed directly to the run method. Parameters ---------- grid : ModelGrid A grid. K_sp : float, array, or field name K in the stream power equation (units vary with other parameters). m_sp : float, optional m in the stream power equation (power on drainage area). n_sp : float, optional n in the stream power equation (power on slope). rainfall intensity : float, array, or field name; optional Modifying factor on drainage area to convert it to a true water volume flux in (m/time). i.e., E = K * (r_i*A)**m * S**n """ self._grid = grid self.K = K_sp # overwritten below in special cases self.m = float(m_sp) self.n = float(n_sp) if type(threshold_sp) in (float, int): self.thresholds = float(threshold_sp) else: if type(threshold_sp) is str: self.thresholds = self.grid.at_node[threshold_sp] else: self.thresholds = threshold_sp assert self.thresholds.size == self.grid.number_of_nodes # make storage variables self.A_to_the_m = grid.zeros(at='node') self.alpha = grid.empty(at='node') self.alpha_by_flow_link_lengthtothenless1 = numpy.empty_like( self.alpha) try: self.grid._diagonal_links_at_node # calc number of diagonal links except AttributeError: pass # was not a raster if self.K is None: raise ValueError('K_sp must be set as a float, node array, or ' + 'field name. It was None.') # now handle the inputs that could be float, array or field name: # some support here for old-style inputs if type(K_sp) is str: if K_sp == 'array': self.K = None else: self.K = self._grid.at_node[K_sp] elif type(K_sp) in (float, int): # a float self.K = float(K_sp) elif (type(K_sp) is numpy.ndarray and len(K_sp) == self.grid.number_of_nodes): self.K = K_sp else: raise TypeError('Supplied type of K_sp ' + 'was not recognised, or array was ' + 'not nnodes long!') if type(rainfall_intensity) is str: raise ValueError('This component can no longer handle ' + 'spatially variable rainfall. Use ' + 'StreamPowerEroder.') if rainfall_intensity == 'array': self._r_i = None else: self._r_i = self._grid.at_node[rainfall_intensity] elif type(rainfall_intensity) in (float, int): #
a float self._r_i = float(rainfall_intensity) elif len(rainfall_intensity) == self.grid.number_of_nodes: raise ValueError('This component can no longer handle ' + 'spatially variable rainfall. Use ' + 'StreamPo
werEroder.') self._r_i = numpy.array(rainfall_intensity) else: raise TypeError('Supplied type of rainfall_' + 'intensity was not recognised!') # We now forbid changing of the field name if 'value_field' in kwds.keys(): raise ValueError('This component can no longer support variable' + 'field names. Use "topographic__elevation".') def erode(self, grid_in, dt=None, K_if_used=None, flooded_nodes=None, rainfall_intensity_if_used=None): """ This method implements the stream power erosion, following the Braun- Willett (2013) implicit Fastscape algorithm. This should allow it to be stable against larger timesteps than an explicit stream power scheme. This driving method for this component is now superceded by the new, standardized wrapper :func:`run_one_step`, but is retained for back compatibility. Set 'K_if_used' as a field name or nnodes-long array if you set K_sp as 'array' during initialization. It returns the grid, in which it will have modified the value of *value_field*, as specified in component initialization. Parameters ---------- grid_in : a grid This is a dummy argument maintained for component back- compatibility. It is superceded by the copy of the grid passed during initialization. dt : float Time-step size. If you are calling the deprecated function :func:`gear_timestep`, that method will supercede any value supplied here. K_if_used : array (optional) Set this to an array if you set K_sp to 'array' in your input file. flooded_nodes : ndarray of int (optional) IDs of nodes that are flooded and should have no erosion. If not provided but flow has still been routed across depressions, erosion may still occur beneath the apparent water level (though will always still be positive). rainfall_intensity_if_used : float or None (optional) Supply to drive this component with a time-varying spatially constant rainfall. Returns ------- grid A reference to the grid.
from django.contrib.auth.models import User from django.core.urlresolvers import reverse from django.test import TestCase from models import Project class ProjectsTest(TestCase): fixtures = ['test_data.json'] def test_project_listing(self): """ Verify that the project listing page contains all projects within the page's context. """ response = self.client.get(reverse("projects:list")) self.failUnlessEqual(response.status_code, 200) try: response.context['project_list'] except KeyError: self.fail("Template context did not contain project_list object.") for project
in Project.objects.published(): self
.assertTrue(project in response.context['project_list']) def test_verify_author_detail_pages(self): """ Verify that each author has a detail page and that the author is contained within the page's context. """ for project in Project.objects.all(): response = self.client.get(project.get_absolute_url()) if project.published(): self.assertTrue(response.status_code == 200) try: self.failUnlessEqual(response.context['project'], project) except KeyError: self.fail("Template context did not contain project object.") else: self.assertTrue(response.status_code == 404)
import paho.mqtt.client as mqtt import json, time import RPi.GPIO as GPIO from time import sleep # The script as below using BCM GPIO 00..nn numbers GPIO.setmode(GPIO.BCM) # Set relay pins as output GPIO.setup(24, GPIO.OUT) # ----- CHANGE THESE FOR YOUR SETUP ----- MQTT_HOST = "190.97.168.236" MQTT_PORT = 1883 USERNAME = '' PASSW
ORD = "" # -----
---------------------------------- def on_connect(client, userdata, rc): print("\nConnected with result code " + str(rc) + "\n") #Subscribing in on_connect() means that if we lose the connection and # reconnect then subscriptions will be renewed. client.subscribe("/iot/control/") print("Subscribed to iotcontrol") def on_message_iotrl(client, userdata, msg): print("\n\t* Raspberry UPDATED ("+msg.topic+"): " + str(msg.payload)) if msg.payload == "gpio24on": GPIO.output(24, GPIO.HIGH) client.publish("/iot/status", "Relay gpio18on", 2) if msg.payload == "gpio24off": GPIO.output(24, GPIO.LOW) client.publish("/iot/status", "Relay gpio18off", 2) def command_error(): print("Error: Unknown command") client = mqtt.Client(client_id="rasp-g1") # Callback declarations (functions run based on certain messages) client.on_connect = on_connect client.message_callback_add("/iot/control/", on_message_iotrl) # This is where the MQTT service connects and starts listening for messages client.username_pw_set(USERNAME, PASSWORD) client.connect(MQTT_HOST, MQTT_PORT, 60) client.loop_start() # Background thread to call loop() automatically # Main program loop while True: time.sleep(10)
# # Copyright (c) 2021 Arm Limited and Contributors. All rights reserved. # SPDX-License-Identifier: Apache-2.0 # """Power cycle devices using the 'Mbed TAS RM REST API'.""" import os import json import time import requests from .host_test_plugins import HostTestPluginBase class HostTestPluginPowerCycleResetMethod(HostTestPluginBase): """Plugin interface adaptor for Mbed TAS RM REST API.""" name = "HostTestPluginPowerCycleResetMethod" type = "ResetMethod" stable = True capabilities = ["power_cycle"] required_parameters = ["target_id", "device_info"] def __init__(self): """Initialise plugin.""" HostTestPluginBase.__init__(self) def setup(self, *args, **kwargs): """Configure plugin. This function should be called before plugin execute() method is used. """ return True def execute(self, capability, *args, **kwargs): """Power cycle a device using the TAS RM API. If the "capability" name is not "power_cycle" this method will just fail. Args: capability: Capability name. args: Additional arguments. kwargs: Additional arguments. Returns: True if the power cycle succeeded, otherwise False. """ if "target_id" not in kwargs or not kwargs["target_id"]: self.print_plugin_error("Error: This plugin requires unique target_id") return False if "device_info" not in kwargs or type(kwargs["device_info"]) is not dict: self.print_plugin_error( "Error: This plugin requires dict parameter 'device_info' passed by " "the caller." ) return False result = False if self.check_parameters(capability, *args, **kwargs) is True: if capability in HostTestPluginPowerCycleResetMethod.capabilities: target_id = kwargs["target_id"] device_info = kwargs["device_info"] ret = self.__get_mbed_tas_rm_addr() if ret: ip, port = ret result = self.__hw_reset(ip, port, target_id, device_info) return result def __get_mbed_tas_rm_addr(self): """Get IP and Port of mbed tas rm service.""" try: ip = os.environ["MBED_TAS_RM_IP"] port = os.environ["MBED_TAS_RM_PORT"] return ip, port except KeyError as e: self.print_plugin_error( "HOST: Failed to read environment variable (" + str(e) + "). Can't perform hardware reset." ) return None def __hw_reset(self, ip, port, target_id, device_info): """Reset target device using TAS RM API.""" switch_off_req = { "name": "switchResource", "sub_requests": [ { "resource_type": "mbed_platform", "resource_id": target_id, "switch_command": "OFF", } ], } switch_on_req = { "name": "switchResource", "sub_requests": [ { "resource_type": "mbed_platform", "resource_id": tar
get_id, "switch_command": "ON", } ], } result = False # reset target switch_off_req = self.__run_request(ip, port, switch_off_req) if switch_off_req is None: self.print_plugin_error("HOST: Failed to communicate with TAS RM!") return result if "error" in switch_off_req["sub_requests"][0]: self.print_plugin_error( "HOST: Failed to reset target. error = %s"
% switch_off_req["sub_requests"][0]["error"] ) return result def poll_state(required_state): switch_state_req = { "name": "switchResource", "sub_requests": [ { "resource_type": "mbed_platform", "resource_id": target_id, "switch_command": "STATE", } ], } resp = self.__run_request(ip, port, switch_state_req) start = time.time() while ( resp and ( resp["sub_requests"][0]["state"] != required_state or ( required_state == "ON" and resp["sub_requests"][0]["mount_point"] == "Not Connected" ) ) and (time.time() - start) < 300 ): time.sleep(2) resp = self.__run_request(ip, port, resp) return resp poll_state("OFF") self.__run_request(ip, port, switch_on_req) resp = poll_state("ON") if ( resp and resp["sub_requests"][0]["state"] == "ON" and resp["sub_requests"][0]["mount_point"] != "Not Connected" ): for k, v in resp["sub_requests"][0].viewitems(): device_info[k] = v result = True else: self.print_plugin_error("HOST: Failed to reset device %s" % target_id) return result @staticmethod def __run_request(ip, port, request): headers = {"Content-type": "application/json", "Accept": "text/plain"} get_resp = requests.get( "http://%s:%s/" % (ip, port), data=json.dumps(request), headers=headers ) resp = get_resp.json() if get_resp.status_code == 200: return resp else: return None def load_plugin(): """Return plugin available in this module.""" return HostTestPluginPowerCycleResetMethod()
#!/usr/bin/env python3 # Copyright (c) 2016 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. # from test_framework.mininode import * from test_framework.test_framework import BitcoinTestFramework from test_framework.util import * import time ''' FeeFilterTest -- test processing of feefilter messages ''' def hashToHex(hash): return format(hash, '064x') # Wait up to 60 secs to see if the testnode has received all the expected invs def allInvsMatch(invsExpected, testnode): for x in range(60): with mininode_lock: if (sorted(invsExpected) == sorted(testnode.txinvs)): return True; time.sleep(1) return False; # TestNode: bare-bones "peer". Used to track which invs are received from a node # and to send the node feefilter messages. class TestNode(SingleNodeConnCB): def __init__(self): SingleNodeConnCB.__init__(self) self.txinvs = [] def on_inv(self, conn, message): for i in message.inv: if (i.type == 1): self.txinvs.append(hashToHex(i.hash)) def clear_invs(self): with mininode_lock: self.txinvs = [] def send_filter(self, feerate):
self.send_message(msg_feefilter(feerate)) self.sync_with_ping() class FeeFilterTest(BitcoinTestFramew
ork): def __init__(self): super().__init__() self.num_nodes = 2 self.setup_clean_chain = False def setup_network(self): # Node1 will be used to generate txs which should be relayed from Node0 # to our test node self.nodes = [] self.nodes.append(start_node(0, self.options.tmpdir, ["-debug", "-logtimemicros"])) self.nodes.append(start_node(1, self.options.tmpdir, ["-debug", "-logtimemicros"])) connect_nodes(self.nodes[0], 1) def run_test(self): node1 = self.nodes[1] node0 = self.nodes[0] # Get out of IBD node1.generate(1) sync_blocks(self.nodes) node0.generate(21) sync_blocks(self.nodes) # Setup the p2p connections and start up the network thread. test_node = TestNode() connection = NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], test_node) test_node.add_connection(connection) NetworkThread().start() test_node.wait_for_verack() # Test that invs are received for all txs at feerate of 20 sat/byte node1.settxfee(Decimal("0.00020000")) txids = [node1.sendtoaddress(node1.getnewaddress(), 1) for x in range(3)] assert(allInvsMatch(txids, test_node)) test_node.clear_invs() # Set a filter of 15 sat/byte test_node.send_filter(15000) # Test that txs are still being received (paying 20 sat/byte) txids = [node1.sendtoaddress(node1.getnewaddress(), 1) for x in range(3)] assert(allInvsMatch(txids, test_node)) test_node.clear_invs() # Change tx fee rate to 10 sat/byte and test they are no longer received node1.settxfee(Decimal("0.00010000")) [node1.sendtoaddress(node1.getnewaddress(), 1) for x in range(3)] sync_mempools(self.nodes) # must be sure node 0 has received all txs # Send one transaction from node0 that should be received, so that we # we can sync the test on receipt (if node1's txs were relayed, they'd # be received by the time this node0 tx is received). This is # unfortunately reliant on the current relay behavior where we batch up # to 35 entries in an inv, which means that when this next transaction # is eligible for relay, the prior transactions from node1 are eligible # as well. node0.settxfee(Decimal("0.00020000")) txids = [node0.sendtoaddress(node0.getnewaddress(), 1)] assert(allInvsMatch(txids, test_node)) test_node.clear_invs() # Remove fee filter and check that txs are received again test_node.send_filter(0) txids = [node1.sendtoaddress(node1.getnewaddress(), 1) for x in range(3)] assert(allInvsMatch(txids, test_node)) test_node.clear_invs() if __name__ == '__main__': FeeFilterTest().main()
import subprocess """ ideas from https://gist.github.com/godber/7692812 """ class PdfInfo: def __init__(self, filepath): self.filepath = filepath self.info = {} self.cmd = "pdfinfo" self.process() def process(self): labels = ['Title', 'Author', 'Creator', 'Pro
ducer', 'CreationDate', \ 'ModDate', 'Tagged', 'Pages', 'Encrypted', 'Page size', \ 'File size', 'Optimized', 'PDF version'] cmdOutput = subprocess.check_output([self.cmd,
self.filepath]) for line in cmdOutput.splitlines(): for label in labels: if label in line: self.info[label] = self.extract(line) def isEncrypted(self): return False if (self.info['Encrypted'][:2]=="no") else True def extract(self, row): return row.split(':', 1)[1].strip() def getPages(self): return int(self.info['Pages']) def getFileSizeInBytes(self): return int(self.info['File size'][:-5].strip())
# # Copyright 2017 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS-IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """Replace strings in a input file to produce an output file. Use as: replace_strings.py --input file.in [--output path/to/file.out] \ --replacement_mapping file_containing_a_replacement_mapping where file_containing_a_replacement_mapping is a file that looks like: {'FROM SOME STRING': 'TO SOME STRING', 'remove_this_entirely': '', 'foo': 'bar } This file is essentially a python dict format, and is insensitive to whitespace. Use this form if the strings your replacing contain spaces, or are otherwise cumbersome to represent in the command line form, which looks like: replace_strings.py --input file.in [--output path/to/file.out] \ --from FROM_STRING --to TO_STRING --from REMOVE_ENTIRELY --to= Note that the intermediate directories to --output will be created if needed. If --output is not specified, results are written to standard output. From gyp: 'actions': [ { 'action_name': 'replace_strings', 'inputs': [ '<(google3_dir)/path/to/file.in', ], 'outputs': [ '<(SHARED_INTERMEDIATE_DIR)/put/file/here/file.out', ], 'action': [ '<(python)', '<(ion_dir)/dev/replace_strings.py', '--replacement_mapping', 'file_containing_a_replacement_mapping', '--output', '<@(_outputs)', '--input', '<@(_inputs)', ], }, ], """ import optparse import os import re import sys def main(argv): """Entry point. Args: argv: use sys.argv[1:]. See ArgumentParser below. """ parser = optparse.OptionParser() parser.add_option('--input') parser.add_option('--output') parser.add_option('--replacement_mapping', default=None) parser.add_option('--from', action='append', default=[]) parser.add_option('--to', action='append', default=[]) options, _ = parser.parse_args(argv) replacement_mapping = {} if options.replacement_mapping is not None: with open(options.replacement_mapping, 'r') as m: replacement_mapping = eval(m.read()) if options.output and not os.path.isdir(os.path.dirname(options.output)): os.makedirs(os.path.dirname(opti
ons.output)) # We can't use options.input here, because 'input' is a python keyword. with open(getattr(options, 'input'), 'r') as input_: text = input_.read() for from_pattern, to_text in replacement_mapping.items(): # Treat f
rom_pattern as a regex, with re.DOTALL (meaning dot captures # newlines). To prevent . from being greedy, use a "?". E.g.: # # 'remove: {.*?}' will correctly handle: # # 'remove: { things we want removed } { things we want to keep }' # # because the . stops at the first '}'. See: # https://docs.python.org/2/library/re.html#regular-expression-syntax text = re.sub(re.compile(from_pattern, re.DOTALL), to_text, text) for from_text, to_text in zip(getattr(options, 'from'), options.to): text = text.replace(from_text, to_text) if options.output: with open(options.output, 'w') as output: output.write(text) else: sys.stdout.write(text) if __name__ == '__main__': main(sys.argv[1:])
/bin/python # -*- coding:utf8 -*- import os import tensorflow as tf from keras import layers from keras.applications.imagenet_utils import _obtain_input_shape from keras.backend.tensorflow_backend import set_session from keras.engine.topology import get_source_inputs from keras.layers import * from keras.models import Model from keras.utils import plot_model from scipy.misc import imsave from scripts.image_process import * K._LEARNING_PHASE = tf.constant(1) # test mode # configure gpu usage os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" os.environ["CUDA_VISIBLE_DEVICES"] = "0" config = tf.ConfigProto() config.gpu_options.per_process_gpu_memory_fraction = 0.5 set_session(tf.Session(config=config)) # pass gpu setting to keras def identity_block(input_tensor, kernel_size, filters, block): """The identity block is the block that has no conv layer at shortcut. # Arguments input_tensor: input tensor kernel_size: defualt 3, the kernel size of middle conv layer at main path filters: list of integers, the filterss of 2 conv layer at main path stage: integer, current stage label, used for generating layer names block: 'a','b'..., current block label, used for generating layer names # Returns Output tensor for the block. """ filters1, filters2 = filters if K.image_data_format() == 'channels_last': bn_axis = 3 else: bn_axis = 1 conv_name_base = 'res' + block + '_branch' bn_name_base = 'bn' + block + '_branch' if kernel_size is None: kernel_size = (3, 3) x = Conv2D(filters1, kernel_size, padding='same', name=conv_name_base + '_a')(input_tensor) x = BatchNormalization(axis=bn_axis, name=bn_name_base + '_a')(x) x = Activation('relu')(x) x = Conv2D(filters2, kernel_size, padding='same', name=conv_name_base + '_b')(x) x = BatchNormalization(axis=bn_axis, name=bn_name_base + '_b')(x) x = layers.add([x, input_tensor]) # x = Activation('relu')(x) return x def gen_model_v2(input_content_tensor=None, input_content_shape=None, input_style_tensor=None, input_style_shape=None, weights_path=None): # Determine proper content input shape input_content_shape = _obtain_input_shape(input_content_shape, default_size=256, min_size=48, data_format=K.image_data_format(), include_top=False) if input_content_tensor is None: content_input = Input(shape=input_content_shape) else: if not K.is_keras_tensor(input_content_tensor): content_input = Input(tensor=input_content_tensor, shape=input_content_shape) else:
content_input = input_content_tensor # Determine proper style input shape input_style_shape = _obtain_input_shape(input_style_shape, default_size=256, min_size=48, data_format=K.image_data_format(),
include_top=False) if input_style_tensor is None: style_input = Input(shape=input_style_shape) else: if not K.is_keras_tensor(input_style_tensor): style_input = Input(tensor=input_style_tensor, shape=input_style_shape) else: style_input = input_style_tensor if K.image_data_format() == 'channels_last': bn_axis = 3 else: bn_axis = 1 # TODO: replace BN with instance norm # content branch x = Conv2D(32, (9, 9), activation='linear', padding='same', name='ct_conv1', strides=(1, 1))(content_input) x = BatchNormalization(axis=bn_axis, name="ct_batchnorm1")(x) x = Activation('relu')(x) x = Conv2D(64, (3, 3), activation='linear', padding='same', name='ct_conv2', strides=(2, 2))(x) x = BatchNormalization(axis=bn_axis, name="ct_batchnorm2")(x) x = Activation('relu')(x) x = Conv2D(128, (3, 3), activation='linear', padding='same', name='ct_conv3', strides=(2, 2))(x) x = BatchNormalization(axis=bn_axis, name="ct_batchnorm3")(x) x = Activation('relu')(x) x_remain = identity_block(input_tensor=x, kernel_size=(3, 3), filters=[128, 128], block='_ct_1') # style branch y = Conv2D(32, (9, 9), activation='linear', padding='same', name='sl_conv1', strides=(1, 1))(style_input) y = BatchNormalization(axis=bn_axis, name="sl_batchnorm1")(y) y = Activation('relu')(y) y = Conv2D(64, (3, 3), activation='linear', padding='same', name='sl_conv2', strides=(2, 2))(y) y = BatchNormalization(axis=bn_axis, name="sl_batchnorm2")(y) y = Activation('relu')(y) y = Conv2D(128, (3, 3), activation='linear', padding='same', name='sl_conv3', strides=(2, 2))(y) y = BatchNormalization(axis=bn_axis, name="sl_batchnorm3")(y) y = Activation('relu')(y) y = identity_block(input_tensor=y, kernel_size=(3, 3), filters=[128, 128], block='_sl_1') y = identity_block(input_tensor=y, kernel_size=(3, 3), filters=[128, 128], block='_sl_2') # special content part x = Conv2D(128, kernel_size=(3, 3), padding='same', name='res' + '_a_1')(x_remain) x = BatchNormalization(axis=bn_axis, name='res' + 'bn_a_1')(x) x = Activation('relu')(x) xy = layers.multiply([x, y]) xy = Conv2D(128, kernel_size=(3, 3), padding='same', name='res' + '_b_2')(xy) xy = BatchNormalization(axis=bn_axis, name='res' + 'bn_b_2')(xy) xy = layers.add([xy, x_remain]) # merged branch z = identity_block(input_tensor=xy, kernel_size=(3, 3), filters=[128, 128], block='_merge') z = identity_block(input_tensor=z, kernel_size=(3, 3), filters=[128, 128], block='_merge_1') z = identity_block(input_tensor=z, kernel_size=(3, 3), filters=[128, 128], block='_merge_2') z = Conv2DTranspose(64, kernel_size=(2, 2), activation='linear', padding='same', strides=(2, 2), name='conv1_T')(z) z = BatchNormalization(axis=bn_axis, name="batchnorm4")(z) z = Activation('relu')(z) z = Conv2DTranspose(32, kernel_size=(2, 2), activation='linear', padding='same', strides=(2, 2), name='conv2_T')(z) z = BatchNormalization(axis=bn_axis, name="batchnorm5")(z) z = Activation('relu')(z) z = Conv2DTranspose(3, kernel_size=(9, 9), activation='linear', padding='same', strides=(1, 1), name='conv4')(z) z = BatchNormalization(axis=bn_axis, name="batchnorm6")(z) outputs = Activation('relu')(z) # Ensure that the model takes into account # any potential predecessors of `input_tensor`. if input_content_tensor is not None: content_inputs = get_source_inputs(input_content_tensor)[0] else: content_inputs = content_input if input_style_tensor is not None: style_inputs = get_source_inputs(input_style_tensor)[0] else: style_inputs = style_input inputs = [content_inputs, style_inputs] # Create model. model = Model(inputs=inputs, outputs=outputs, name='Gen_model_v2') # load weights if weights_path is not None: model.load_weights(weights_path, by_name=True) return model, outputs if __name__ == '__main__': # load images content_img_path = '../images/content/train-3.jpg' content_img = preprocess_image(content_img_path, height=256, width=256) print'Input content_image shape:', content_img.shape[1:4] style_img_path = '../images/style/starry_night.jpg' style_img = preprocess_image(style_img_path, height=256, width=256) print'Input style_image shape:', style_img.shape[1:4] # load model model, _ = gen_model_v2(input_content_shape=style_img.shape[1:4], input_style_shape=content_img.shape[1:4]) plot_model(model, to_file='../images/autoencoder/Gen_model_v2.png', show_shapes=True, show_layer_names=True) output = model.predict([content_img, style_img]) output = deprocess_image(output) print'Output image shape:', output.shape[1:4] # pylab.imshow(output[0])
, '>=': 'gte', '<': 'lt', '<=': 'lte' } class SearchException(Exception): """ Exception class for unparseable search queries """ def __init__(self, message): self.message = message def contact_search(org, query, base_queryset): """ Searches for contacts :param org: the org (used for date formats and timezones) :param query: the query, e.g. 'name = "Bob"' :param base_queryset: the base query set which queries operate on :return: a tuple of the contact query set, a boolean whether query was complex """ from .models import URN_SCHEME_CHOICES global PROPERTY_ALIASES if not PROPERTY_ALIASES: PROPERTY_ALIASES = {scheme: 'urns__path' for scheme, label in URN_SCHEME_CHOICES} try: return contact_search_complex(org, query, base_queryset), True except SearchException: pass # if that didn't work, try again as simple name or urn path query return contact_search
_simple(org, query, base_queryset)
, False def contact_search_simple(org, query, base_queryset): """ Performs a simple term based search, e.g. 'Bob' or '250783835665' """ matches = ('name__icontains',) if org.is_anon else ('name__icontains', 'urns__path__icontains') terms = query.split() q = Q(pk__gt=0) for term in terms: term_query = Q(pk__lt=0) for match in matches: term_query |= Q(**{match: term}) if org.is_anon: # try id match for anon orgs try: term_as_int = int(term) term_query |= Q(id=term_as_int) except ValueError: pass q &= term_query return base_queryset.filter(q).distinct() def contact_search_complex(org, query, base_queryset): """ Performs a complex query based search, e.g. 'name = "Bob" AND age > 18' """ global search_lexer, search_parser # attach context to the lexer search_lexer.org = org search_lexer.base_queryset = base_queryset # combining results from multiple joins can lead to duplicates return search_parser.parse(query, lexer=search_lexer).distinct() def generate_queryset(lexer, identifier, comparator, value): """ Generates a queryset from the base and given field condition :param lexer: the lexer :param identifier: the contact attribute or field name, e.g. name :param comparator: the comparator, e.g. = :param value: the literal value, e.g. "Bob" :return: the query set """ # resolve identifier aliases, e.g. '>' -> 'gt' if identifier in PROPERTY_ALIASES.keys(): identifier = PROPERTY_ALIASES[identifier] if identifier in NON_FIELD_PROPERTIES: if identifier == 'urns__path' and lexer.org.is_anon: raise SearchException("Cannot search by URN in anonymous org") q = generate_non_field_comparison(identifier, comparator, value) else: from temba.contacts.models import ContactField try: field = ContactField.objects.get(org=lexer.org, key=identifier) except ObjectDoesNotExist: raise SearchException("Unrecognized contact field identifier %s" % identifier) if field.value_type == Value.TYPE_TEXT: q = generate_text_field_comparison(field, comparator, value) elif field.value_type == Value.TYPE_DECIMAL: q = generate_decimal_field_comparison(field, comparator, value) elif field.value_type == Value.TYPE_DATETIME: q = generate_datetime_field_comparison(field, comparator, value, lexer.org) elif field.value_type == Value.TYPE_STATE or field.value_type == Value.TYPE_DISTRICT: q = generate_location_field_comparison(field, comparator, value) else: raise SearchException("Unrecognized contact field type '%s'" % field.value_type) return lexer.base_queryset.filter(q) def generate_non_field_comparison(relation, comparator, value): lookup = TEXT_LOOKUP_ALIASES.get(comparator, None) if not lookup: raise SearchException("Unsupported comparator %s for non-field" % comparator) return Q(**{'%s__%s' % (relation, lookup): value}) def generate_text_field_comparison(field, comparator, value): lookup = TEXT_LOOKUP_ALIASES.get(comparator, None) if not lookup: raise SearchException("Unsupported comparator %s for text field" % comparator) return Q(**{'values__contact_field__key': field.key, 'values__string_value__%s' % lookup: value}) def generate_decimal_field_comparison(field, comparator, value): lookup = DECIMAL_LOOKUP_ALIASES.get(comparator, None) if not lookup: raise SearchException("Unsupported comparator %s for decimal field" % comparator) try: value = Decimal(value) except Exception: raise SearchException("Can't convert '%s' to a decimal" % unicode(value)) return Q(**{'values__contact_field__key': field.key, 'values__decimal_value__%s' % lookup: value}) def generate_datetime_field_comparison(field, comparator, value, org): lookup = DATETIME_LOOKUP_ALIASES.get(comparator, None) if not lookup: raise SearchException("Unsupported comparator %s for datetime field" % comparator) # parse as localized date and then convert to UTC tz = pytz.timezone(org.timezone) local_date = str_to_datetime(value, tz, org.get_dayfirst(), fill_time=False) # passed date wasn't parseable so don't match any contact if not local_date: return Q(pk=-1) value = local_date.astimezone(pytz.utc) if lookup == '<equal>': # check if datetime is between date and date + 1d, i.e. anytime in that 24 hour period return Q(**{ 'values__contact_field__key': field.key, 'values__datetime_value__gte': value, 'values__datetime_value__lt': value + timedelta(days=1)}) elif lookup == 'lte': # check if datetime is less then date + 1d, i.e. that day and all previous return Q(**{ 'values__contact_field__key': field.key, 'values__datetime_value__lt': value + timedelta(days=1)}) elif lookup == 'gt': # check if datetime is greater than or equal to date + 1d, i.e. day after and subsequent return Q(**{ 'values__contact_field__key': field.key, 'values__datetime_value__gte': value + timedelta(days=1)}) else: return Q(**{'values__contact_field__key': field.key, 'values__datetime_value__%s' % lookup: value}) def generate_location_field_comparison(field, comparator, value): lookup = LOCATION_LOOKUP_ALIASES.get(comparator, None) if not lookup: raise SearchException("Unsupported comparator %s for location field" % comparator) return Q(**{ 'values__contact_field__key': field.key, 'values__location_value__name__%s' % lookup: value}) #################################### Lexer definition #################################### tokens = ('BINOP', 'COMPARATOR', 'TEXT', 'STRING') literals = '()' # treat reserved words specially # http://www.dabeaz.com/ply/ply.html#ply_nn4 reserved = { 'or': 'BINOP', 'and': 'BINOP', 'has': 'COMPARATOR', 'is': 'COMPARATOR', } t_ignore = ' \t' # ignore tabs and spaces def t_COMPARATOR(t): r"""(?i)~|=|[<>]=?|~~?""" return t def t_STRING(t): r"""("[^"]*")""" t.value = t.value[1:-1] return t def t_TEXT(t): r"""[\w_\.\+\-\/]+""" t.type = reserved.get(t.value.lower(), 'TEXT') return t def t_error(t): raise SearchException("Invalid character %s" % t.value[0]) #################################### Parser definition #################################### precedence = ( (str('left'), str('BINOP')), ) def p_expression_binop(p): """expression : expression BINOP expression""" if p[2].lower() == 'and': p[0] = p[1] & p[3] elif p[2].lower() == 'or': p[0] = p[1] | p[3] def p_expression_grouping(p): """expression : '(' expression ')'""" p[0] = p[2] def p_expression_comparison(p): """expression : TEXT COMPARATOR literal""" p[0] = generate_queryset(p.lexer, p[1].lower(), p[2].
# -*- coding: cp1252 -*- import urllib,urllib2,re,cookielib,string,os,sys import xbmc, xbmcgui, xbmcaddon, xbmcplugin from resources.libs import main #Mash Up - by Mash2k3 2012. from t0mm0.common.addon import Addon from resources.universal import playbackengine, watchhistory addon_id = 'plugin.video.movie25' selfAddon = xbmcaddon.Addon(id=addon_id) addon = Addon('plugin.video.movie25', sys.argv) art = main.art wh = watchhistory.WatchHistory('plugin.video.movie25') def MAINFB(): main.GA("Sports","FitnessBlender") main.addDir('Body Focus','bf',199,art+'/fitnessblender.png') main.addDir('Difficulty','bf',200,art+'/fitnessblender.png') main.addDir('Training Type','bf',201,art+'/fitnessblender.png
') def DIFFFB(): main.addDir('Level 1','http://www.fitnessblender.com/v/full-length-workouts/?all=1p=1&str=&time_min=&time_max=&cal_min=&cal_max=&difficulty[]=1&type[]=&equipment[]=&body_focus[]=',202,art+'/fitnessblender.png') main.addDir('Level 2','http://www.fitnessblender.com/v/full-length-workouts/?all=1p=1&str=&time_min=&time_max=&cal_min=&cal_max=&difficulty[]=2&type
[]=&equipment[]=&body_focus[]=',202,art+'/fitnessblender.png') main.addDir('Level 3','http://www.fitnessblender.com/v/full-length-workouts/?all=1p=1&str=&time_min=&time_max=&cal_min=&cal_max=&difficulty[]=3&type[]=&equipment[]=&body_focus[]=',202,art+'/fitnessblender.png') main.addDir('Level 4','http://www.fitnessblender.com/v/full-length-workouts/?all=1p=1&str=&time_min=&time_max=&cal_min=&cal_max=&difficulty[]=4&type[]=&equipment[]=&body_focus[]=',202,art+'/fitnessblender.png') main.addDir('Level 5','http://www.fitnessblender.com/v/full-length-workouts/?all=1p=1&str=&time_min=&time_max=&cal_min=&cal_max=&difficulty[]=5&type[]=&equipment[]=&body_focus[]=',202,art+'/fitnessblender.png') def BODYFB(): main.addDir('Upper Body','http://www.fitnessblender.com/v/full-length-workouts/?all=1p=1&str=&time_min=&time_max=&cal_min=&cal_max=&difficulty[]=&type[]=&equipment[]=&body_focus[]=36',202,art+'/fitnessblender.png') main.addDir('Core','http://www.fitnessblender.com/v/full-length-workouts/?all=1p=1&str=&time_min=&time_max=&cal_min=&cal_max=&difficulty[]=&type[]=&equipment[]=&body_focus[]=34',202,art+'/fitnessblender.png') main.addDir('Lower Body','http://www.fitnessblender.com/v/full-length-workouts/?all=1p=1&str=&time_min=&time_max=&cal_min=&cal_max=&difficulty[]=&type[]=&equipment[]=&body_focus[]=35',202,art+'/fitnessblender.png') main.addDir('Total Body','http://www.fitnessblender.com/v/full-length-workouts/?all=1p=1&str=&time_min=&time_max=&cal_min=&cal_max=&difficulty[]=&type[]=&equipment[]=&body_focus[]=37',202,art+'/fitnessblender.png') def TRAINFB(): main.addDir('Balance/Agility','http://www.fitnessblender.com/v/full-length-workouts/?all=1p=1&str=&time_min=&time_max=&cal_min=&cal_max=&difficulty[]=&type[]=3e&equipment[]=&body_focus[]=',202,art+'/fitnessblender.png') main.addDir('Barre','http://www.fitnessblender.com/v/full-length-workouts/?all=1p=1&str=&time_min=&time_max=&cal_min=&cal_max=&difficulty[]=&type[]=3a&equipment[]=&body_focus[]=',202,art+'/fitnessblender.png') main.addDir('Cardiovascular','http://www.fitnessblender.com/v/full-length-workouts/?all=1p=1&str=&time_min=&time_max=&cal_min=&cal_max=&difficulty[]=&type[]=3f&equipment[]=&body_focus[]=',202,art+'/fitnessblender.png') main.addDir('HIIT','http://www.fitnessblender.com/v/full-length-workouts/?all=1p=1&str=&time_min=&time_max=&cal_min=&cal_max=&difficulty[]=&type[]=38&equipment[]=&body_focus[]=',202,art+'/fitnessblender.png') main.addDir('Kettlebell','http://www.fitnessblender.com/v/full-length-workouts/?all=1p=1&str=&time_min=&time_max=&cal_min=&cal_max=&difficulty[]=&type[]=39&equipment[]=&body_focus[]=',202,art+'/fitnessblender.png') main.addDir('Low Impact','http://www.fitnessblender.com/v/full-length-workouts/?all=1p=1&str=&time_min=&time_max=&cal_min=&cal_max=&difficulty[]=&type[]=3c&equipment[]=&body_focus[]=',202,art+'/fitnessblender.png') main.addDir('Pilates','http://www.fitnessblender.com/v/full-length-workouts/?all=1p=1&str=&time_min=&time_max=&cal_min=&cal_max=&difficulty[]=&type[]=3d&equipment[]=&body_focus[]=',202,art+'/fitnessblender.png') main.addDir('Plyometric','http://www.fitnessblender.com/v/full-length-workouts/?all=1p=1&str=&time_min=&time_max=&cal_min=&cal_max=&difficulty[]=&type[]=3h&equipment[]=&body_focus[]=',202,art+'/fitnessblender.png') main.addDir('Strength Training','http://www.fitnessblender.com/v/full-length-workouts/?all=1p=1&str=&time_min=&time_max=&cal_min=&cal_max=&difficulty[]=&type[]=3i&equipment[]=&body_focus[]=',202,art+'/fitnessblender.png') main.addDir('Toning','http://www.fitnessblender.com/v/full-length-workouts/?all=1p=1&str=&time_min=&time_max=&cal_min=&cal_max=&difficulty[]=&type[]=3j&equipment[]=&body_focus[]=',202,art+'/fitnessblender.png') main.addDir('Warm Up/Cool Down','http://www.fitnessblender.com/v/full-length-workouts/?all=1p=1&str=&time_min=&time_max=&cal_min=&cal_max=&difficulty[]=&type[]=3v&equipment[]=&body_focus[]=',202,art+'/fitnessblender.png') main.addDir('Yoga/Stretching/Flexibility','http://www.fitnessblender.com/v/full-length-workouts/?all=1p=1&str=&time_min=&time_max=&cal_min=&cal_max=&difficulty[]=&type[]=3b&equipment[]=&body_focus[]=',202,art+'/fitnessblender.png') def LISTBF(murl): main.GA("FitnessBlender","List") link=main.OPENURL(murl) link=link.replace('\r','').replace('\n','').replace('\t','').replace('&nbsp;','').replace('–','-') main.addLink("[COLOR red]Body Focus [/COLOR]"+"[COLOR yellow]Calorie Burn [/COLOR]"+"[COLOR blue]Difficulty [/COLOR]"+"[COLOR green]Duration[/COLOR]",'','') match=re.compile('<a class="teaser group" href="(.+?)"><div class=".+?<img id=".+?" class="fit_img.+?data-original="(.+?)" alt="([^"]+)".+?"><p>Calorie Burn:(.+?)</p><p>Minutes:(.+?)</p><p>Difficulty:(.+?)</p><p>Body Focus:(.+?)</p></div>').findall(link) for url,thumb,name,cal,dur,diff,bf in match: main.addPlayMs(name+" [COLOR red]"+bf+"[/COLOR]"+"[COLOR yellow]"+cal+"[/COLOR]"+"[COLOR blue]"+diff+"[/COLOR]"+"[COLOR green]"+dur+"[/COLOR]",'http://www.fitnessblender.com/'+url,203,thumb,'','','','','') def LINKBB(mname,murl,thumb): ok=True main.GA("FitnessBlender","Watched") link=main.OPENURL(murl) link=link.replace('\r','').replace('\n','').replace('\t','').replace('&nbsp;','').replace('–','-') match=re.compile('src="http://www.youtube.com/embed/(.+?).?rel').findall(link) for url in match: url = "plugin://plugin.video.youtube/?path=/root/video&action=play_video&videoid="+url stream_url = url # play with bookmark player = playbackengine.PlayWithoutQueueSupport(resolved_url=stream_url, addon_id=addon_id, video_type='', title=mname,season='', episode='', year='',img=thumb,infolabels='', watchedCallbackwithParams=main.WatchedCallbackwithParams,imdb_id='') #WatchHistory if selfAddon.getSetting("whistory") == "true": wh.add_item(mname+' '+'[COLOR green]Fitness Blender[/COLOR]', sys.argv[0]+sys.argv[2], infolabels='', img=thumb, fanart='', is_folder=False) player.KeepAlive() return ok
): return self.model.objects.filter(element_type = 'number') admin.site.register(NumberElementModel, NumberElementAdmin) class RangeElementAdmin(GeoformElementAdmin): form = RangeElementForm fieldsets = ( (None, { 'fields': ('question', 'min_label', 'max_label',) }), (_('Advanced options'), { 'classes': ('collapse',), 'fields': ('min_value', 'max_value', 'step', 'initial_value',) }), ) def queryset(self, request): return self.model.objects.filter(element_type = 'range') admin.site.register(RangeElementModel, RangeElementAdmin) class ParagraphElementAdmin(GeoformElementAdmin): form = ParagraphForm def queryset(self, request): return self.model.objects.filter(element_type = 'paragraph') admin.site.register(ParagraphElementModel, ParagraphElementAdmin) class DrawbuttonElementAdmin(GeoformElementAdmin): form = DrawbuttonForm def queryset(self, request): return self.model.objects.filter(element_type = 'drawbutton') admin.site.register(DrawbuttonElementModel, DrawbuttonElementAdmin) class CheckboxElementAdmin(GeoformElementAdmin): form = CheckboxElementForm add_form_template = 'admin/geoforms/geoformelement/create_element.html' change_form_template = add_form_template def queryset(self, request): return self.model.objects.filter(element_type = 'checkbox') def add_view(self, request, form_url='', extra_context=None): if request.method == 'POST': ces = formset_factory(CheckboxElementForm, formset=CheckboxElementFormSet) cs = ces(request.POST) cs.save() return HttpResponseRedirect(reverse('admin:geoforms_checkboxelementmodel_changelist')) else: return super(CheckboxElementAdmin, self).add_view(request, form_url = '', extra_context = { 'current_app': self.admin_site.name, 'form': QuestionForm(), 'formset': formset_factory(CheckboxElementForm)}) def change_view(self, request, object_id, form_url='', extra_context=None): if request.method == 'POST': ces = formset_factory(CheckboxElementForm, formset=CheckboxElementFormSet) cs = ces(request.POST) cs.save() return HttpResponseRedirect(reverse('admin:geoforms_checkboxelementmodel_changelist')) else: initial_data = [] question_data = {'question': []} checkboxelement = CheckboxElementModel.objects.get(id = object_id) for i, lang in enumerate(settings.LANGUAGES): html = getattr(checkboxelement,'html_%s' % lang[0]) if html == None: html = getattr(checkboxelement,'html_%s' % settings.LANGUAGES[0][0]) soup = BeautifulSoup(html) question_data['question'].append(soup.p.text.strip()) if soup.find(attrs={'data-random': 'true'}): question_data['randomize'] = True labels = soup.find_all('label') for j, label in enumerate(labels): if i == 0: initial_data.append({u'label': [label.text.strip()]}) else: initial_data[j]['label'].append(label.text.strip()) return super(CheckboxElementAdmin, self).change_view(request, object_id, form_url = '', extra_context = { 'current_app': self.admin_site.name, 'form': QuestionForm(initial = question_data), 'formset': formset_factory(CheckboxElementForm, extra = 0)(initial = initial_data)}) admin.site.register(CheckboxElementModel, CheckboxElementAdmin) class RadioElementAdmin(GeoformElementAdmin): form = RadioElementForm add_form_template = 'admin/geoforms/geoformelement/create_element.html' change_form_template = add_form_template def queryset(self, request): return self.model.objects.filter(element_type = 'radio') def add_view(self, request, form_url='', extra_context=None): if request.method == 'POST': res = formset_factory(RadioElementForm, formset=RadioElementFormSet) rs = res(request.POST) rs.save() return HttpResponseRedirect(reverse('admin:geoforms_radioelementmodel_changelist')) else: return super(RadioElementAdmin, self).add_view(request, form_url = '', extra_context = { 'current_app': self.admin_site.name, 'form': QuestionForm(), 'formset': formset_factory(RadioElementForm)}) def change_view(self, request, object_id, form_url='', extra_context=None): if request.method == 'POST': res = formset_factory(RadioElementForm, formset=RadioElementFormSet) rs = res(request.POST) rs.save() return HttpResponseRedirect(reverse('admin:geoforms_radioelementmodel_changelist')) else: initial_data = [] question_data = {'question': []} radioelement = RadioElementModel.objects.get(id = object_id) for i, lang in enumerate(settings.LANGUAGES): html = getattr(radioelement,'html_%s' % lang[0]) if html == None: html = getattr(radioelement,'html_%s' % settings.LANGUAGES[0][0])
soup = BeautifulSoup(html) question_data['question'].append(soup.p.text) if soup.find(attrs={'data-random': 'true'}): question_data['randomize'] = True labels = soup.f
ind_all('label') for j, label in enumerate(labels): if i == 0: initial_data.append({u'label': [label.text.strip()]}) else: initial_data[j]['label'].append(label.text.strip()) return super(RadioElementAdmin, self).change_view(request, object_id, form_url = '', extra_context = { 'current_app': self.admin_site.name, 'form': QuestionForm(initial = question_data), 'formset': formset_factory(RadioElementForm, extra = 0)(initial = initial_data)}) admin.site.register(RadioElementModel, RadioElementAdmin) class SelectElementAdmin(GeoformElementAdmin): form = SelectElementForm add_form_template = 'admin/geoforms/geoformelement/create_element.html' change_form_template = add_form_template def queryset(self,
# Lint as: python3 # Copyright 2021 Google LLC. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this
file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
express or implied. # See the License for the specific language governing permissions and # limitations under the License. """An abstract class for the Vizier client for both CAIP and uCAIP."""
# -*- coding: utf-8 -*- # Part of Odoo. See LICENSE file for full copyright and licensing details. { 'name': 'Account Analytic Defaults', 'version': '1.0', 'category': 'Accounting', 'description': """ Set default values for your analytic accounts. ============================================== Allows to automatically select analytic accounts based on criterions: ---------------------------------------------------------------------
* Product * Partner * User * Company * Date """, 'website': 'https://www.odoo.com/page/accounting', 'depends': ['sale_stock'], 'data': [ 'security/ir.model.access.csv', 'security/account_analytic_default_security.xml', 'account_analytic_default_view.xml' ], 'demo':
[], 'installable': True, 'auto_install': False, }
import collections import itertools import nengo import pacman103 from .config import Config import connection import ensemble import node import probe import utils class Assembler(object): """The Assembler object takes a built collection of objects and connections and converts them into PACMAN vertices and edges, and returns the portion of the network to be simulated on host. """ object_builders = dict() # Map of classes to functions connection_builders = dict() # Map of (pre_obj, post_obj) tuples to functions @classmethod def register_object_builder(cls, func, nengo_class): cls.object_builders[nengo_class] = func @classmethod def register_connection_builder(cls, func, pre_obj=None, post_obj=None): cls.connection_builders[(pre_obj, post_obj)] = func def build_object(self, obj): for obj_type in obj.__class__.__mro__: if obj_type in self.object_builders: break else: raise TypeError("Cannot assemble object of type '%s'." % obj.__class__.__name__) vertex = self.object_builders[obj_type](obj, self) if vertex is not None: assert isinstance(vertex, pacman103.lib.graph.Vertex) vertex.runtime = self.time_in_seconds return vertex def build_connection(self, connection): pre_c = list(connection.pre_obj.__class__.__mro__) + [None] post_c = list(connection.post_obj.__class__.__mro__) + [None] for key in itertools.chain(*[[(a, b) for b in post_c] for a in pre_c]): if key in self.connection_builders: return self.connection_builders[key](connection, self) else: raise TypeError("Cannot build a connection from a '%s' to '%s'." % (connection.pre_obj.__class__.__name__, connection.post_obj.__class__.__name__)) def __call__(self, objs, conns, time_in_seconds, dt, config=None): """Construct PACMAN vertices and edges, and a reduced version of the model for simulation on host. :param objs: A list of objects to convert into PACMAN vertices. :param conns: A list of connections which will become edges. :param time_in_seconds: The run time of the simulation (None means infinite). :param dt: The time step of the simulation. :param config: Configuration options for the simulation. """ # Store the config self.config = config if self.config is None: self.config = Config() self.timestep = 1000 self.dt = dt self.time_in_seconds = time_in_seconds self.n_ticks = (int(time_in_seconds / dt) if time_in_seconds is not None else 0) # Store for querying self.connections = conns # Construct each object in turn to produce vertices self.object_vertices = dict([(o, self.build_object(o)) for o in objs]) self.vertices = [v for v in self.object_vertices.values() if v is not None] # Construct each connection in turn to produce edges self.edges = filter(lambda x: x is not None, [self.build_connection(c) for c in conns]) return self.vertices, self.edges def get_object_vertex(self, obj): """Return the vertex which represents the given object.""" return self.object_vertices[obj] def get_incoming_connections(self, obj): return [c for c in self.connections if c.post_obj == obj]
def get_outgoing_connections(self, obj): return [c for c in self.connections if c.pre_obj == obj] Assembler.register_connection_builder(connection.generic_connection_builder) Assembler.register_object_builder(ensemble.EnsembleLIF.assemble, ensemble.IntermediateEnsembleLIF) Assembler.register_object_builder(node.FilterVertex.ass
emble_from_intermediate, node.IntermediateFilter) Assembler.register_object_builder(node.FilterVertex.assemble, node.FilterVertex) Assembler.register_object_builder(probe.DecodedValueProbe.assemble, probe.IntermediateProbe) def vertex_builder(vertex, assembler): return vertex Assembler.register_object_builder(vertex_builder, pacman103.lib.graph.Vertex) def assemble_node(node, assembler): pass Assembler.register_object_builder(assemble_node, nengo.Node) MulticastPacket = collections.namedtuple('MulticastPacket', ['timestamp', 'key', 'payload']) class MulticastPlayer(utils.vertices.NengoVertex): # NOTE This is intended to be temporary while PACMAN is refactored MODEL_NAME = 'nengo_mc_player' MAX_ATOMS = 1 def __init__(self): super(MulticastPlayer, self).__init__(1) self.regions = [None, None, None, None] @classmethod def assemble(cls, mcp, assembler): # Get all the symbols to transmit prior to and after the simulation sinks = set( c.post_obj for c in assembler.get_outgoing_connections(mcp)) start_items = list() end_items = list() for sink in sinks: for p in sink.start_packets: start_items.extend([0, p.key, 0 if p.payload is None else p.payload, p.payload is not None]) for p in sink.end_packets: end_items.extend([0, p.key, 0 if p.payload is None else p.payload, p.payload is not None]) # Build the regions start_items.insert(0, len(start_items)/4) start_region = utils.vertices.UnpartitionedListRegion( start_items) end_items.insert(0, len(end_items)/4) end_region = utils.vertices.UnpartitionedListRegion( end_items) mcp.regions[1] = start_region mcp.regions[3] = end_region return mcp Assembler.register_object_builder(MulticastPlayer.assemble, MulticastPlayer)
# # Copyright (C) 2010, 2011, 2014 Stanislav Bohm # # This file is part of Kaira. # # Kaira is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, version 3 of the License, or # (at your option) any later version. # # Kaira is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Kaira. If not, see <http://www.gnu.org/licenses/>. # import gtk import socket from subprocess import Popen, PIPE, STDOUT from threading import Thread, Lock class ReadLineThread(Thread): def __init__(self, stream): Thread.__init__(self) self.stream = stream self.lock = Lock() self.exit_flag = False self.daemon = True def start(self): Thread.start(self) def run(self): while True: line = self.stream.readline() if line == "": self.on_exit() return with self.lock: if self.exit_flag: return if not self.on_line(line, self.stream): return def readline(self): return self.stream.readline() def safe_call(self, callback, *params): if callback is None: return gtk.gdk.threads_enter() try: return callback(*params) finally: gtk.gdk.threads_leave() def set_exit_flag(self): with self.lock: self.exit_flag = True class ProcessThread(ReadLineThread): def __init__(self, process, line_callback, exit_callback): ReadLineThread.__init__(self, process.stdout) self.process = process self.line_callback =
line_callback self.exit_callback = exit_callback def on_exit(self): self.process.w
ait() return self.safe_call(self.exit_callback, self.process.returncode) def on_line(self, line, stream): return self.safe_call(self.line_callback, line, stream) class ConnectionThread(ReadLineThread): def __init__(self, host, port, line_callback, exit_callback, connect_callback): ReadLineThread.__init__(self, None) self.host = host self.port = port self.line_callback = line_callback self.exit_callback = exit_callback self.connect_callback = connect_callback self.sock = None def run(self): try: self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) self.sock.connect((self.host, self.port)) self.stream = self.sock.makefile("r") self.safe_call(self.connect_callback, self.stream) ReadLineThread.run(self) except socket.error, e: self.on_exit(str(e)) def on_exit(self, message = None): if self.stream: self.stream.close() return self.safe_call(self.exit_callback, message) def on_line(self, line, stream): return self.safe_call(self.line_callback, line, stream) class Process: def __init__(self, filename, line_callback = None, exit_callback = None): self.filename = filename self.line_callback = line_callback self.exit_callback = exit_callback self.cwd = None def start(self, params = []): self._start_process(params) self.pipe_in = self.process.stdin self._start_thread() def start_and_get_first_line(self, params = []): self._start_process(params) self.pipe_in = self.process.stdin line = self.process.stdout.readline() self._start_thread() return line def _start_process(self, params): self.process = Popen((self.filename,) + tuple(params), bufsize=0, stdin=PIPE, stdout=PIPE, stderr=STDOUT, cwd=self.cwd) def _start_thread(self): self.thread = ProcessThread(self.process, self.line_callback, self.exit_callback) self.thread.start() def write(self, string): self.pipe_in.write(string) def shutdown(self, silent = True): self.thread.set_exit_flag() if silent: try: self.process.terminate() except OSError: pass else: self.process.terminate() class Connection: def __init__(self, hostname, port, line_callback = None, exit_callback = None, connect_callback = None): self.hostname = hostname self.port = port self.line_callback = line_callback self.exit_callback = exit_callback self.connect_callback = connect_callback def start(self): self.thread = ConnectionThread(self.hostname, self.port, self.line_callback, self.exit_callback, self.connect_callback) self.thread.start() def write(self, text): self.thread.sock.send(text) class CommandWrapper: def __init__(self, backend): self.backend = backend self.callbacks = [] self.lock = Lock() def start(self, *params): self.backend.line_callback = self._line_callback self.backend.start(*params) def run_command(self, command, callback, lines=None): if callback: with self.lock: self.callbacks.append((callback, lines)) if command is not None: self.backend.write(command + "\n") def run_command_expect_ok(self, command, ok_callback=None, fail_callback=None, finalize_callback=None): def callback(line): if finalize_callback: finalize_callback() if line != "Ok\n": if fail_callback: fail_callback() else: print "Command '{0}' returns '{1}'".format(command, line) else: if ok_callback: ok_callback() self.run_command(command, callback) def shutdown(self): self.backend.shutdown() def readline(self): """ Read line from backned. !! You can use this only if you are in "callback" !! """ return self.backend.readline() def _line_callback(self, line, stream): if line.startswith("ERROR:"): print line return False with self.lock: assert self.callbacks, line cb, lines = self.callbacks[0] del self.callbacks[0] if lines is None: cb(line) else: buffer = [ line ] + [ stream.readline() for i in xrange(lines - 1) ] cb(buffer) return True
from FindPathsPlugin import FindPathsPlugin import tulipplugins class FindPaths0(FindPathsPlugin): """ Tulip plugin algorithm whic
h searches for 1-
hop paths """ def __init__(self, context): FindPathsPlugin.__init__(self, context, 0) # The line below does the magic to register the plugin to the plugin database # and updates the GUI to make it accessible through the menus. tulipplugins.registerPlugin("FindPaths0", "Find Nodes (Regex)", "Nathaniel Nelson", "9/3/2016", "", "1.0")
n # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """Unit tests for Superset""" import json import prison import pytest from flask import escape from superset import app from superset.models import core as models from tests.integration_tests.dashboards.base_case import DashboardTestCase from tests.integration_tests.dashboards.consts import * from tests.integration_tests.dashboards.dashboard_test_utils import * from tests.integration_tests.dashboards.superset_factory_util import * from tests.integration_tests.fixtures.energy_dashboard import ( load_energy_table_with_slice, ) class TestDashboardDatasetSecurity(DashboardTestCase): @pytest.fixture def load_dashboard(self): with app.app_context(): table = ( db.session.query(SqlaTable).filter_by(table_name="energy_usage").one() ) # get a slice from the allowed table slice = db.session.query(Slice).filter_by(slice_name="Energy Sankey").one() self.grant_public_access_to_table(table) pytest.hidden_dash_slug = f"hidden_dash_{random_slug()}" pytest.published_dash_slug = f"published_dash_{random_slug()}" # Create a published and hidden dashboard and add them to the database published_dash = Dashboard() published_dash.dashboard_title = "Published Dashboard" published_dash.slug = pytest.published_dash_slug published_dash.slices = [slice] published_dash.published = True hidden_dash = Dashboard() hidden_dash.dashboard_title = "Hidden Dashboard" hidden_dash.slug = pytest.hidden_dash_slug hidden_dash.slices = [slice] hidden_dash.published = False db.session.merge(published_dash) db.session.merge(hidden_dash) yield db.session.commit() self.revoke_public_access_to_table(table) db.session.delete(published_dash) db.session.delete(hidden_dash) db.session.commit() def test_dashboard_access__admin_can_access_all(self): # arrange self.login(username=ADMIN_USERNAME) dashboard_title_by_url = { dash.url: dash.dashboard_title for dash in get_all_dashboards() } # act responses_by_url = { url: self.client.get(url) for url in dashboard_title_by_url.keys() } # assert for dashboard_url, get_dashboard_response in responses_by_url.items(): self.assert200(get_dashboard_response) def test_get_dashboards__users_are_dashboards_owners(self): # arrange username = "gamma" user = security_manager.find_user(username) my_owned_dashboard = create_dashboard_to_db( dashboard_title="My Dashboard", published=False, owners=[user], ) not_my_owned_dashboard = create_dashboard_to_db( dashboard_title="Not My Dashboard", published=False, ) self.login(user.username) # act get_dashboards_response = self.get_resp(DASHBOARDS_API_URL) # assert self.assertIn(my_owned_dashboard.url, get_dashboards_response) self.assertNotIn(not_my_owned_dashboard.url, get_dashboards_response) def test_get_dashboards__owners_can_view_empty_dashboard(self): # arrange dash = create_dashboard_to_db("Empty Dashboard", slug="empty_dashboard") dashboard_url = dash.url gamma_user = security_manager.find_user("gamma") self.login(gamma_user.username) # act get_dashboards_response = self.get_resp(DASHBOARDS_API_URL) # assert self.assertNotIn(dashboard_url, get_dashboards_response) def test_get_dashboards__users_can_view_favorites_dashboards(self): # arrange user = security_manager.find_user("gamma") fav_dash_slug = f"my_favorite_dash_{random_slug()}" regular_dash_slug = f"regular_dash_{random_slug()}" favorite_dash = Dashboard() favorite_dash.dashboard_title = "My Favorite Dashboard" favorite_dash.slug = fav_dash_slug regular_dash = Dashboard() regular_dash.dashboard_title = "A Plain Ol Dashboard" regular_dash.slug = regular_dash_slug db.session.merge(favorite_dash) db.session.merge(regular_dash) db.session.commit() dash = db.session.query(Dashboard).filter_by(slug=fav_dash_slug).first() favorites = models.FavStar() favorites.obj_id = dash.id favorites.class_name = "Dashboard" favorites.user_id = user.id db.session.merge(favorites) db.session.commit() self.login(user.username) # act get_dashboards_response = self.get_resp(DASHBOARDS_API_URL) # assert self.assertIn(f"/superset/dashboard/{fav_dash_slug}/", get_dashboards_response) def test_get_dashboards__user_can_not_view_unpublished_dash(self): # arrange admin_user = security_manager.find_user(ADMIN_USERNAME) gamma_user = security_manager.find_user(GAMMA_USERNAME) admin_and_draft_dashboard = create_dashboard_to_db( dashboard_title="admin_owned_unpublished_dash", owners=[admin_user] ) self.login(gamma_user.username) # act - list dashboards as a gamma user get_dashboards_response_as_gamma = self.get_resp(DASHBOARDS_API_URL) # assert self.assertNotIn( admin_and_draft_dashboard.url, get_dashboards_response_as_gamma ) @pytest.mark.usefixtures("load_energy_table_with_slice", "load_dashboard") def test_get_dashboards__users_can_view_permitted_dashboard(self): # arrange username = random_str() new_role = f"role_{random_str()}" self.create_user_with_roles(username, [new_role], should_create_roles=True) accessed_table = get_sql_table_by_name("energy_usage") self.grant_role_access_to_table(accessed_table, new_role) # get a slice from the allowed table slice_to_add_to_dashboards = get_slice_by_name("Energy Sankey") # Create a published and hidden dashboard and add them to the database first_dash = create_dashboard_to_db( dashboard_title="Published Dashboard", published=True, slices=[slice_to_add_to_dashboards], ) second_dash = create_dashboard_to_db( dashboard_title="Hidden Dashboard", published=True, slices=[slice_to_add_to_dashboards], ) try: self.login(username) # act get_dashboards_response = self.get_resp(DASHBOARDS_API_URL) # assert self.assertIn(second_dash.url, get_dashboards_response) self.assertIn(first_dash.url, get_dashboards_response) finally: self.revoke_public_access_to_table(accessed_table) def test_get_dashboards_api_no_data_access(self): """ Dashboard API: Test get dashboards no data access """ admin = self.get_user("admin") title = f"title{random_str()}" create_dashboard_to_db(title, "slug1",
owners=[admin]) self.login(username="gamma") arguments = { "filters": [{"col": "dashboard_title", "opr": "sw", "value": title[0:8]}]
} uri = DASHBOARDS_API_URL_WITH_QUERY_FORMAT.format(prison.dumps(arguments)) rv = self.client.get(uri) self.assert200(rv) data = json.loads(rv.data.d
# -*- coding: utf-8 -*- # Copyright (c) 2012-2016 CoNWeT Lab., Universidad Politécnica de Madrid # This file is part of Wirecloud. # Wirecloud is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # Wirecloud is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero General Public License for more details. # You should have received a copy of the GNU Affero General Public License # along with Wirecloud. If not, see <http://www.gnu.org/licenses/>. import json from django.core.cache import cache from django.http import HttpResponse from django.shortcuts import get_object_or_404 from django.utils.translation import ugettext as _ import six from wirecloud.catalogue.models import CatalogueResource from wirecloud.commons.baseviews import Resource from wirecloud.commons.utils.cache import CacheableData from wirecloud.commons.utils.http import authentication_required, build_error_response, get_absolute_reverse_url, get_current_domain, consumes, parse_json_request from wirecloud.platform.models import Workspace from wirecloud.platform.wiring.utils import generate_xhtml_operator_code, get_operator_cache_key class WiringEntry(Resource): @authentication_required @consumes(('application/json',)) def update(self, request, workspace_id): workspace = get_object_or_404(Workspace, id=workspace_id) if not request.user.is_superuser and workspace.creator != request.user: return build_error_response(request, 403, _('You are not allowed to update this workspace')) new_wiring_status = parse_json_request(request) old_wiring_status = workspace.wiringStatus # Check read only connections old_read_only_connections = [connection for connection in old_wiring_status['connections'] if connection.get('readonly', False)] new_read_only_connections = [connection for connection in new_wiring_status['connections'] if connection.get('readonly', False)] if len(old_read_only_connections) > len(new_read_only_connections): return build_error_response(request, 403, _('You are not allowed to remove or update read only connections')) for connection in old_read_only_connections: if connection not in new_read_only_connections: return build_error_response(request, 403, _('You are not allowed to remove or upda
te read only connections')) # Check operator preferences for operator_id, operator in six.iteritems(new_wiring_status['operators']): if operator_id in old_wiring_status['operators']: old_operator = old_wiring_status['operators'][operator_id]
added_preferences = set(operator['preferences'].keys()) - set(old_operator['preferences'].keys()) removed_preferences = set(old_operator['preferences'].keys()) - set(operator['preferences'].keys()) updated_preferences = set(operator['preferences'].keys()).intersection(old_operator['preferences'].keys()) else: # New operator added_preferences = operator['preferences'].keys() removed_preferences = () updated_preferences = () for preference_name in added_preferences: if operator['preferences'][preference_name].get('readonly', False) or operator['preferences'][preference_name].get('hidden', False): return build_error_response(request, 403, _('Read only and hidden preferences cannot be created using this API')) for preference_name in removed_preferences: if old_operator['preferences'][preference_name].get('readonly', False) or old_operator['preferences'][preference_name].get('hidden', False): return build_error_response(request, 403, _('Read only and hidden preferences cannot be removed')) for preference_name in updated_preferences: old_preference = old_operator['preferences'][preference_name] new_preference = operator['preferences'][preference_name] if old_preference.get('readonly', False) != new_preference.get('readonly', False) or old_preference.get('hidden', False) != new_preference.get('hidden', False): return build_error_response(request, 403, _('Read only and hidden status cannot be changed using this API')) if new_preference.get('readonly', False) and new_preference.get('value') != old_preference.get('value'): return build_error_response(request, 403, _('Read only preferences cannot be updated')) workspace.wiringStatus = new_wiring_status workspace.save() return HttpResponse(status=204) def process_requirements(requirements): return dict((requirement['name'], {}) for requirement in requirements) class OperatorEntry(Resource): def read(self, request, vendor, name, version): operator = get_object_or_404(CatalogueResource, type=2, vendor=vendor, short_name=name, version=version) # For now, all operators are freely accessible/distributable #if not operator.is_available_for(request.user): # return HttpResponseForbidden() mode = request.GET.get('mode', 'classic') key = get_operator_cache_key(operator, get_current_domain(request), mode) cached_response = cache.get(key) if cached_response is None: options = json.loads(operator.json_description) js_files = options['js_files'] base_url = get_absolute_reverse_url('wirecloud.showcase_media', kwargs={ 'vendor': operator.vendor, 'name': operator.short_name, 'version': operator.version, 'file_path': operator.template_uri }, request=request) xhtml = generate_xhtml_operator_code(js_files, base_url, request, process_requirements(options['requirements']), mode) cache_timeout = 31536000 # 1 year cached_response = CacheableData(xhtml, timeout=cache_timeout, content_type='application/xhtml+xml; charset=UTF-8') cache.set(key, cached_response, cache_timeout) return cached_response.get_response()
# -*- coding: utf-8 -*- from operator import attrgetter from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType from pyangbind.lib.yangtypes import RestrictedClassType from pyangbind.lib.yangtypes import TypedListType from pyangbind.lib.yangtypes import YANGBool from pyangbind.lib.yangtypes import YANGListType from pyangbind.lib.yangtypes import YANGDynClass from pyangbind.lib.yangtypes import ReferenceType from pyangbind.lib.base import PybindBase from collections import OrderedDict from decimal import Decimal from bitarray import bitarray import six # PY3 support of some PY2 keywords (needs improved) if six.PY3: import builtins as __builtin__ long = int elif six.PY2: import __builtin__ from . import state class interface_ref(PybindBase): """ This class was auto-generated by the PythonClass plugin for PYANG from YANG module openconfig-vlan - based on the path /vlans/vlan/members/member/interface-ref. Each member element of the container is represented as a class variable - with a specific YANG type. YANG Description: Reference to an interface or subinterface """ __slots__ = ("_path_helper", "_extmethods", "__state") _yang_name = "interface-ref" _pybind_generated_by = "container" def __init__(self, *args, **kwargs): self._path_helper = False self._extmethods = False self.__state = YANGDynClass( base=state.state, is_container="container", yang_name="state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace="http://openconfig.net/yang/vlan", defining_module="openconfig-vlan", yang_type="container", is_config=False, ) load = kwargs.pop(
"load", None) if args: if len(args) > 1: raise TypeError("cannot create a YANG container with >1 argument") all_attr = True for e in se
lf._pyangbind_elements: if not hasattr(args[0], e): all_attr = False break if not all_attr: raise ValueError("Supplied object did not have the correct attributes") for e in self._pyangbind_elements: nobj = getattr(args[0], e) if nobj._changed() is False: continue setmethod = getattr(self, "_set_%s" % e) if load is None: setmethod(getattr(args[0], e)) else: setmethod(getattr(args[0], e), load=load) def _path(self): if hasattr(self, "_parent"): return self._parent._path() + [self._yang_name] else: return ["vlans", "vlan", "members", "member", "interface-ref"] def _get_state(self): """ Getter method for state, mapped from YANG variable /vlans/vlan/members/member/interface_ref/state (container) YANG Description: Operational state for interface-ref """ return self.__state def _set_state(self, v, load=False): """ Setter method for state, mapped from YANG variable /vlans/vlan/members/member/interface_ref/state (container) If this variable is read-only (config: false) in the source YANG file, then _set_state is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_state() directly. YANG Description: Operational state for interface-ref """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass( v, base=state.state, is_container="container", yang_name="state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace="http://openconfig.net/yang/vlan", defining_module="openconfig-vlan", yang_type="container", is_config=False, ) except (TypeError, ValueError): raise ValueError( { "error-string": """state must be of a type compatible with container""", "defined-type": "container", "generated-type": """YANGDynClass(base=state.state, is_container='container', yang_name="state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace='http://openconfig.net/yang/vlan', defining_module='openconfig-vlan', yang_type='container', is_config=False)""", } ) self.__state = t if hasattr(self, "_set"): self._set() def _unset_state(self): self.__state = YANGDynClass( base=state.state, is_container="container", yang_name="state", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions=None, namespace="http://openconfig.net/yang/vlan", defining_module="openconfig-vlan", yang_type="container", is_config=False, ) state = __builtin__.property(_get_state) _pyangbind_elements = OrderedDict([("state", state)])
-Since': last_modified_since, 'Date': self.get_date_header()} status, header, body = \ self._test_object_PUT_copy(swob.HTTPOk, header) self.assertEqual(status.split()[0], '200') # After the check of the copy source in the case of s3acl is valid, # Swift3 check the buc
ket write permissions of the destination. self.assertEqual(len(self.swift.calls_with_headers), 3) _, _, headers = self.swift.calls_with_headers[-1] self.assertTrue(headers.get('If-None-Match') is None) self.assertTrue(headers.get('If-Unmodified-Since') is None) _, _,
headers = self.swift.calls_with_headers[0] self.assertEqual(headers['If-None-Match'], etag) self.assertEqual(headers['If-Unmodified-Since'], last_modified_since) @s3acl def test_object_POST_error(self): code = self._test_method_error('POST', '/bucket/object', None) self.assertEqual(code, 'NotImplemented') @s3acl def test_object_DELETE_error(self): code = self._test_method_error('DELETE', '/bucket/object', swob.HTTPUnauthorized) self.assertEqual(code, 'SignatureDoesNotMatch') code = self._test_method_error('DELETE', '/bucket/object', swob.HTTPForbidden) self.assertEqual(code, 'AccessDenied') code = self._test_method_error('DELETE', '/bucket/object', swob.HTTPServerError) self.assertEqual(code, 'InternalError') code = self._test_method_error('DELETE', '/bucket/object', swob.HTTPServiceUnavailable) self.assertEqual(code, 'InternalError') with patch('swift3.request.get_container_info', return_value={'status': 204}): code = self._test_method_error('DELETE', '/bucket/object', swob.HTTPNotFound) self.assertEqual(code, 'NoSuchKey') with patch('swift3.request.get_container_info', return_value={'status': 404}): code = self._test_method_error('DELETE', '/bucket/object', swob.HTTPNotFound) self.assertEqual(code, 'NoSuchBucket') @s3acl @patch('swift3.cfg.CONF.allow_multipart_uploads', False) def test_object_DELETE_no_multipart(self): req = Request.blank('/bucket/object', environ={'REQUEST_METHOD': 'DELETE'}, headers={'Authorization': 'AWS test:tester:hmac', 'Date': self.get_date_header()}) status, headers, body = self.call_swift3(req) self.assertEqual(status.split()[0], '204') self.assertNotIn(('HEAD', '/v1/AUTH_test/bucket/object'), self.swift.calls) self.assertIn(('DELETE', '/v1/AUTH_test/bucket/object'), self.swift.calls) _, path = self.swift.calls[-1] self.assertEqual(path.count('?'), 0) @s3acl def test_object_DELETE_multipart(self): req = Request.blank('/bucket/object', environ={'REQUEST_METHOD': 'DELETE'}, headers={'Authorization': 'AWS test:tester:hmac', 'Date': self.get_date_header()}) status, headers, body = self.call_swift3(req) self.assertEqual(status.split()[0], '204') self.assertIn(('HEAD', '/v1/AUTH_test/bucket/object'), self.swift.calls) self.assertIn(('DELETE', '/v1/AUTH_test/bucket/object'), self.swift.calls) _, path = self.swift.calls[-1] self.assertEqual(path.count('?'), 0) @s3acl def test_slo_object_DELETE(self): self.swift.register('HEAD', '/v1/AUTH_test/bucket/object', swob.HTTPOk, {'x-static-large-object': 'True'}, None) self.swift.register('DELETE', '/v1/AUTH_test/bucket/object', swob.HTTPOk, {}, '<SLO delete results>') req = Request.blank('/bucket/object', environ={'REQUEST_METHOD': 'DELETE'}, headers={'Authorization': 'AWS test:tester:hmac', 'Date': self.get_date_header(), 'Content-Type': 'foo/bar'}) status, headers, body = self.call_swift3(req) self.assertEqual(status.split()[0], '204') self.assertEqual(body, '') self.assertIn(('HEAD', '/v1/AUTH_test/bucket/object'), self.swift.calls) self.assertIn(('DELETE', '/v1/AUTH_test/bucket/object' '?multipart-manifest=delete'), self.swift.calls) _, path, headers = self.swift.calls_with_headers[-1] path, query_string = path.split('?', 1) query = {} for q in query_string.split('&'): key, arg = q.split('=') query[key] = arg self.assertEqual(query['multipart-manifest'], 'delete') self.assertNotIn('Content-Type', headers) def _test_object_for_s3acl(self, method, account): req = Request.blank('/bucket/object', environ={'REQUEST_METHOD': method}, headers={'Authorization': 'AWS %s:hmac' % account, 'Date': self.get_date_header()}) return self.call_swift3(req) def _test_set_container_permission(self, account, permission): grants = [Grant(User(account), permission)] headers = \ encode_acl('container', ACL(Owner('test:tester', 'test:tester'), grants)) self.swift.register('HEAD', '/v1/AUTH_test/bucket', swob.HTTPNoContent, headers, None) @s3acl(s3acl_only=True) def test_object_GET_without_permission(self): status, headers, body = self._test_object_for_s3acl('GET', 'test:other') self.assertEqual(self._get_error_code(body), 'AccessDenied') @s3acl(s3acl_only=True) def test_object_GET_with_read_permission(self): status, headers, body = self._test_object_for_s3acl('GET', 'test:read') self.assertEqual(status.split()[0], '200') @s3acl(s3acl_only=True) def test_object_GET_with_fullcontrol_permission(self): status, headers, body = \ self._test_object_for_s3acl('GET', 'test:full_control') self.assertEqual(status.split()[0], '200') @s3acl(s3acl_only=True) def test_object_PUT_without_permission(self): status, headers, body = self._test_object_for_s3acl('PUT', 'test:other') self.assertEqual(self._get_error_code(body), 'AccessDenied') @s3acl(s3acl_only=True) def test_object_PUT_with_owner_permission(self): status, headers, body = self._test_object_for_s3acl('PUT', 'test:tester') self.assertEqual(status.split()[0], '200') @s3acl(s3acl_only=True) def test_object_PUT_with_write_permission(self): account = 'test:other' self._test_set_container_permission(account, 'WRITE') status, headers, body = self._test_object_for_s3acl('PUT', account) self.assertEqual(status.split()[0], '200') @s3acl(s3acl_only=True) def test_object_PUT_with_fullcontrol_permission(self): account = 'test:other' self._test_set_container_permission(account, 'FULL_CONTROL') status, headers, body = \ self._test_object_for_s3acl('PUT', account) self.assertEqual(status.split()[0], '200') @s3acl(s3acl_only=True) def test_object_DELETE_without_permission(self): account = 'test:other' status, headers, body = self.
# -*- coding: utf-8 -*- """ legi
t.helpers ~~~~~~~~~~~~~ Various Python helpers. """ import os import platform _platform = platform.system().lower() is_osx = (_platform == 'darwin') is_win = (_platform == 'windows') is_lin = (_platform == 'linux') def find_path_above(*names): """Attempt to locate given path by searching parent dirs.""" path = '.' while os.path.split(os.path.abspath(path))[1]: for name in names: joine
d = os.path.join(path, name) if os.path.exists(joined): return os.path.abspath(joined) path = os.path.join('..', path)
import jso
n,os,shelve import asyncio,sys DATAFILENAME="data" def set_user_id(new_id): _local_data["user_id"]=new_id def set_login_token(token): _local_data["login_token"]=token def load_data(): global _local_data if(os.path.exists(os.path.join(get_current_path(),DATAFILENAME))): with open(os.path.join(get_current_path(),DATAFILENAME), 'r') as f: try: _local_data=json.loads(f.read()) except: _local_data={} else:_local_data={} def
save_data(): with open(os.path.join(get_current_path(),DATAFILENAME), 'w') as f: f.write(json.dumps(_local_data)) def get_user_id(): return _local_data.get("user_id") def get_login_token(): return _local_data.get("login_token") def get_template_path(): return os.path.join(get_current_path(),"templates") def get_current_path(): if getattr(sys, 'frozen', False): # we are running in a bundle f = sys.executable else: # we are running in a normal Python environment f = __file__ return os.path.dirname(os.path.abspath(f)) def get_client_version(): VERSIONFILE="client_version" with open(os.path.join(get_current_path(),VERSIONFILE), 'r') as f: return float(f.read().strip()) def get_sync_path(): return _local_data.get("sync_path",None) def set_sync_path(path): _local_data["sync_path"]=path record=None from contextlib import closing import aiohttp # $ pip install aiohttp download_semaphore = asyncio.Semaphore(5) async def download_file(url,path): chunk_size=1<<15 async with download_semaphore: with closing(aiohttp.ClientSession()) as session: filename = str(path) response = await session.get(url) with closing(response), open(filename, 'wb') as file: while True: # save file chunk = await response.content.read(chunk_size) if not chunk: break file.write(chunk) return filename upload_semaphore = asyncio.Semaphore(5) async def upload_file(url,data): async with upload_semaphore: with closing(aiohttp.ClientSession()) as session: return await session.post(url, data=data) import hashlib def file_md5(filename): h = hashlib.md5() with open(filename, 'rb', buffering=0) as f: for b in iter(lambda : f.read(128*1024), b''): h.update(b) return h.hexdigest()
# -*- coding: utf-8 -*- # # Configuration file for the Sphinx documentation builder. # # This file does only contain a selection of the most common options. For a # full list see the documentation: # http://www.sphinx-doc.org/en/master/config # -- Path setup -------------------------------------------------------------- # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. # # import os # import sys # sys.path.insert(0, os.path.abspath('.')) # -- Project information ----------------------------------------------------- project = 'msgiver' copyright = '2018, Tatsunori Nishikori' author = 'Tatsunori Nishikori' # The short X.Y version version = '0.1' # The full version, including alpha/beta/rc tags release = '0.1.7.1' # -- General configuration --------------------------------------------------- # If your documentation needs a minimal Sphinx version, state it here. # # needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. extensions = [ 'sphinx.ext.autodoc', 'sphinx.ext.coverage', 'sphinx.ext.githubpages', ] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix(es) of source filenames. # You can specify multiple suffix as a list of string: # # source_suffix = ['.rst', '.md'] source_suffix = '.rst' # The master toctree document. master_doc = 'index' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. # # This is also used if you do content translation via gettext catalogs. # Usually you set "language" from the command line for these cases. language = None # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. # This pattern also affects html_static_path and html_extra_path . exclude_patterns = ['build', 'Thumbs.db', '.DS_Store'] # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # -- Options for HTML output ------------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. # html_theme = 'alabaster' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. # # html_theme_options = {} # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # Custom sidebar templates, must be a dictionary that maps document names # to template names. # # The default sidebars (for documents that don't match any pattern) are # defined by theme itself. Builtin themes are using these templates by # default: ``['localtoc.html', 'relations.html', 'sourcelink.html', # 'searchbox.html']``. # # html_sidebars = {} # -- Options for HTMLHelp output --------------------------------------------- # Output file base name for HTML help builder. htmlhelp_basename = 'msgiverdoc' # -- Options for LaTeX output ------------------------------------------------ latex_elements = { # The paper size ('letterpaper' or 'a4paper'). # # 'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). # # 'pointsize': '10pt', # Additional stuff for the LaTeX preamble. # # 'preamble': '', # Latex figure (float) alignment # # 'figure_align': 'htbp', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, # author, documentclass [howto, manual, or own class]). latex_documents = [ (master_doc, 'msgiver.tex', 'msgiver Documentation', 'Tatsunori Nishikori', 'manual'), ] # -- Options for manual page output ------------------------------------------ # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ (master_doc, 'msgiver', 'msgiver Documentation', [author], 1) ] # -- Options for Texinfo output
---------------------------------------------- # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, # dir menu entry, description,
category) texinfo_documents = [ (master_doc, 'msgiver', 'msgiver Documentation', author, 'msgiver', 'One line description of project.', 'Miscellaneous'), ]
from __future__ import absolute_import, division,
print_function, unicode_literals from django.contrib.auth.decorators import user_passes_test from django_otp import user_has_device from django_otp.conf import settings def otp_required(view=None, redirect_field_name='next', login_url=None, if_configured=False): """ Similar to :func:`
~django.contrib.auth.decorators.login_required`, but requires the user to be :term:`verified`. By default, this redirects users to :setting:`OTP_LOGIN_URL`. :param if_configured: If ``True``, an authenticated user with no confirmed OTP devices will be allowed. Default is ``False``. :type if_configured: bool """ if login_url is None: login_url = settings.OTP_LOGIN_URL def test(user): return user.is_verified() or (if_configured and user.is_authenticated() and not user_has_device(user)) decorator = user_passes_test(test, login_url=login_url, redirect_field_name=redirect_field_name) return decorator if (view is None) else decorator(view)
# import common __version__ = common.version d
el common
import settings
#!/usr/bin/env python3 import os import logging from datetime import datetime from settings import JSONDIR from defs import load_data from san_env import get_apps debug_records_flag = False def save(appname, relations): apps = get_apps() for filename, modelname, filters in relations: records = load_data(os.path.join(JSONDIR, filename), []) model = apps.get_model(app_label=appname, model_name=modelname) if filters.get('before_delete_all'): model.objects.all().delete() elif filters.get('before_delete'): model.objects.filter(**filters['before_delete']).delete() if debug_records_flag is False: model.objects.bulk_create([model
(**record) for record in records]) else: for record in records: try:
model(**record).save() except: print('== {} =='.format(modelname)) for key in sorted(record.keys()): print(key, record[key] if key in record else '') print('\n') logging.info('--- file: %s -> model: %s | %s records' %( filename, modelname, len(records))) return
totalF1 = 0 for r in range(nr): startRow, endRow = X.indptr[r], X.indptr[r+1] xInds = X.indices[startRow:endRow] xVals = X.data[startRow:endRow] rowLen = endRow - startRow scores = np.zeros(nl) for (ind, val) in zip(xInds, xVals): weightVals = wData[ind] weightInds = wRows[ind] scores[weightInds] += val * weightVals scores += b positives = scores > thresh startRow, endRow = y.indptr[r], y.indptr[r+1] yInds = y.indices[startRow:endRow] yVals = y.data[startRow:endRow] if useSqErr: scores[yInds] = yVals - scores[yInds] scores = np.clip(scores, 0, np.inf) scores[yInds] *= -1 loss += 0.5 * np.dot(scores, scores) else: pos = logistic.logcdf(scores) neg = logistic.logcdf(-scores) pos -= neg loss += (-pos[yInds].dot(yVals)-neg.sum()) tp = positives[yInds].sum() fn = (~positives)[yInds].sum() fp = positives.sum() - tp # tp + fp = p if tp > 0: totalF1 += (2.0 * tp) / (2.0 * tp + fn + fp) elif fn + fp == 0: totalF1 += 1 loss /= nr f1Arr = totalF1 / nr return loss, f1Arr # Get macro F1 and optionally output per-label F1 and label frequencies to file def getLossMacro(X, wRows, wData, b, y, outputFilename=""): nr,nc = X.shape assert y.shape == (nr,nl) assert wRows.size == wData.size == nc if useSqErr: thresh = 0.3 else: thresh = math.log(0.3 / 0.7) tp = np.zeros(nl, dtype="int") fp = np.zeros(nl, dtype="int") fn = np.zeros(nl, dtype="int") sZeros = 0 for r in range(nr): startRow, endRow = X.indptr[r], X.indptr[r+1] xInds = X.indices[startRow:endRow] xVals = X.data[startRow:endRow] rowLen = endRow - startRow scores = np.zeros(nl) for (ind, val) in zip(xInds, xVals): weightVals = wData[ind] weightInds = wRows[ind] scores[weightInds] += val * weightVals sZeros = (scores == 0).sum() scores += b positives = scores > thresh startRow, endRow = y.indptr[r], y.indptr[r+1] yVals = y.indices[startRow:endRow] truth = np.zeros(nl, dtype="bool") truth[yVals] = True tps = np.logical_and(truth, positives) tp[tps] += 1 fps = np.logical_and(~truth, positives) fp[fps] += 1 fns = np.logical_and(truth, ~positives) fn[fns] += 1 nonZeros = tp > 0 f1 = np.zeros(nl) f1[nonZeros] = (2.0 * tp[nonZeros]) / (2.0 * tp[nonZeros] + fp[nonZeros] + fn[nonZeros]) goodZeros = np.logical_and(tp == 0, np.logical_and(fp == 0, fn == 0)) f1[goodZeros] = 1 macroF1 = np.average(f1) if outputFilename != "": labFreq = y.sum(0).getA1() / nr with open(outputFilename, "w") as outputFile: for (freq, f1val) in zip(labFreq, f1): outputFile.write(str(freq) + "\t" + str(f1val) + "\n") return macroF1 # split a csr_matrix into two def split(indptr, indices, data, splitPoint): nc = indices.max() + 1 nr = indptr.size - 1 testIndptr = indptr[splitPoint:].copy() beginTestIdx = testIndptr[0] testIndices = indices[beginTestIdx:] testData = data[beginTestIdx:] testIndptr -= beginTestIdx indptr = indptr[:splitPoint+1] indices = indices[:beginTestIdx] data = data[:beginTestIdx] train = sp.csr_matrix((data, indices, indptr), (splitPoint, nc)) test = sp.csr_matrix((testData, testIndices, testIndptr), (nr - splitPoint, nc)) return train, test # read data formatted for bioASQ def makeBioASQData(dataFilename, testDataFilename, trainN, trainFrac, labelFrac, testN): assert 0 <= trainFrac <= 1 assert not ((testDataFilename == "") and (testN == 0)) if dataFilename.endswith(".gz"): datafile = gzi
p.open(dataFilename) else: datafile = open(dataFilename) nr = 0 numVals = 0 numLabVals = 0 keeperCounter = 0 featCounts = {} line_process_counter = 0 for line in datafi
le: line_process_counter += 1 if line_process_counter % 100 == 0: print "pass 1 of 4: " + str(line_process_counter) keeperCounter += trainFrac if keeperCounter < 1: continue else: keeperCounter -= 1 splitLine = line.split('\t') assert (len(splitLine) == 2) feats = set(splitLine[0].split(' ')) numVals += len(feats) for feat in feats: intFeat = int(feat) if intFeat in featCounts: featCounts[intFeat] += 1 else: featCounts[intFeat] = 1 numLabVals += splitLine[1].count(' ') + 1 nr += 1 if nr == trainN: break datafile.close() print "Made it past reading data file" Xdata = np.ndarray(numVals) Xindices = np.ndarray(numVals, dtype='int64') Xindptr = np.ndarray(nr+1, dtype="int64") Xindptr[0] = 0 Ydata = np.ndarray(numLabVals) Yindices = np.ndarray(numLabVals, dtype='int64') Yindptr = np.ndarray(nr+1, dtype="int64") Yindptr[0] = 0 insNum = 0 featIdx = 0 labIdx = 0 keeperCounter = 0 def addFeat(indices, data, idx, feat, count): indices[idx] = feat adjCount = featCounts[feat] - 0.5 #absolute discounting data[idx] = math.log1p(count) * math.log(float(nr) / adjCount) def addIns(splitFeats, idx, indices, data): intFeats = [] for strFeat in splitFeats: intFeats.append(int(strFeat)) intFeats.sort() startIdx = idx # add feats, using log(1+count) * log(nr/totalCount) as feature value count = 0 currFeat = -1 for feat in intFeats: if feat != currFeat: if currFeat in featCounts: addFeat(indices, data, idx, currFeat, count) idx +=1 count = 1 else: count += 1 currFeat = feat if currFeat in featCounts: addFeat(indices, data, idx, currFeat, count) idx += 1 # normalize to unit 2-norm xVec = data[startIdx:idx] xVec /= linalg.norm(xVec) return idx if dataFilename.endswith(".gz"): datafile = gzip.open(dataFilename) else: datafile = open(dataFilename) print "second datafile loop" second_line_counter = 0 for line in datafile: second_line_counter += 1 if second_line_counter % 100 == 0: print "pass 2 of 4: " + str(second_line_counter) keeperCounter += trainFrac if keeperCounter < 1: continue else: keeperCounter -= 1 splitLine = line.split('\t') assert (len(splitLine) == 2) # extract feats as integers and sort splitFeats = splitLine[0].split(' ') featIdx = addIns(splitFeats, featIdx, Xindices, Xdata) Xindptr[insNum+1] = featIdx # same stuff with labels (here there should be only 1 per line) splitLabels = splitLine[1].split(' ') intLabels = [] for strLab in splitLabels: intLabels.append(int(strLab)) intLabels.sort() numLabels = len(intLabels) endLabIdx = labIdx + numLabels Yindices[labIdx:endLabIdx] = intLabels Ydata[labIdx:endLabIdx] = np.ones(numLabels) Yindptr[insNum+1] = endLabIdx labIdx = endLabIdx insNum += 1 if insNum == trainN: break datafile.close() assert insNum == nr if testDataFilename != "": if testDataFilename.endswith(".gz"): datafile = gzip.open(testDa
#!/usr/bin/env python3 from anormbookmarker.test.test_enviroment import * with self_contained_session(CONFIG.database_timestamp) as session: BASE.metadata.create_all(session.bind) # make a tag to make an alias to aa = T
ag.construct(session=session, tag='a a') session.commit() db_result = [('select COUNT(*) from alias;', 0), ('sel
ect COUNT(*) from aliasword;', 0), ('select COUNT(*) from bookmark;', 0), ('select COUNT(*) from filename;', 0), ('select COUNT(*) from tag;', 1), ('select COUNT(*) from tag_relationship;', 0), ('select COUNT(*) from tagbookmarks;', 0), ('select COUNT(*) from tagword;', 2), ('select COUNT(*) from word;', 1), ('select COUNT(*) from wordmisspelling;', 0)] check_db_result(config=CONFIG, db_result=db_result)
eference k = li_boost_shared_ptr.Klass("me oh my") kret = li_boost_shared_ptr.reftest(k) val = kret.getValue() self.verifyValue("me oh my reftest", val) self.verifyCount(1, k) self.verifyCount(1, kret) # pass by pointer reference k = li_boost_shared_ptr.Klass("me oh my") kret = li_boost_shared_ptr.pointerreftest(k) val = kret.getValue() self.verifyValue("me oh my pointerreftest", val) self.verifyCount(1, k) self.verifyCount(1, kret) # null tests k = None if (li_boost_shared_ptr.smartpointertest(k) != None): raise RuntimeError("return was not null") if (li_boost_shared_ptr.smartpointerpointertest(k) != None): raise RuntimeError("return was not null") if (li_boost_shared_ptr.smartpointerreftest(k) != None): raise RuntimeError("return was not null") if (li_boost_shared_ptr.smartpointerpointerreftest(k) != None): raise RuntimeError("return was not null") if (li_boost_shared_ptr.nullsmartpointerpointertest(None) != "null pointer"): raise RuntimeError("not null smartpointer pointer") try: li_boost_shared_ptr.valuetest(k) raise RuntimeError("Failed to catch null pointer") except ValueError: pass if (li_boost_shared_ptr.pointertest(k) != None): raise RuntimeError("return was not null") try: li_boost_shared_ptr.reftest(k) raise RuntimeError("Failed to catch null pointer") except ValueError: pass # $owner k = li_boost_shared_ptr.pointerownertest() val = k.getValue() self.verifyValue("pointerownertest", val) self.verifyCount(1, k) k = li_boost_shared_ptr.smartpointerpointerownertest() val = k.getValue() self.verifyValue("smartpointerpointerownertest", val) self.verifyCount(1, k) # //////////////////////////////// Derived class //////////////////////////////////////// # derived pass by shared_ptr k = li_boost_shared_ptr.KlassDerived("me oh my") kret = li_boost_shared_ptr.derivedsmartptrtest(k) val = kret.getValue() self.verifyValue("me oh my derivedsmartptrtest-Derived", val) self.verifyCount(2, k) self.verifyCount(2, kret) # derived pass by shared_ptr pointer k = li_boost_shared_ptr.KlassDerived("me oh my") kret = li_boost_shared_ptr.derivedsmartptrpointertest(k) val = kret.getValue() self.verifyValue("me oh my derivedsmartptrpointertest-Derived", val) self.verifyCount(2, k) self.verifyCount(2, kret) # derived pass by shared_ptr ref k = li_boost_shared_ptr.KlassDerived("me oh my") kret = li_boost_shared_ptr.derivedsmartptrreftest(k) val = kret.getValue() self.verifyValue("me oh my derivedsmartptrreftest-Derived", val) self.verifyCount(2, k) self.verifyCount(2, kret) # derived pass by shared_ptr pointer ref k = li_boost_shared_ptr.KlassDerived("me oh my") kret = li_boost_shared_ptr.derivedsmartptrpointerreftest(k) val = kret.getValue()
self.verifyValue("me oh my derivedsmartptrpointerreftest-Derived", val) self.verifyCount(2, k) self.verifyCount(2, kret) # derived pass by pointer k = li_boost_shared_ptr.KlassDeriv
ed("me oh my") kret = li_boost_shared_ptr.derivedpointertest(k) val = kret.getValue() self.verifyValue("me oh my derivedpointertest-Derived", val) self.verifyCount(1, k) self.verifyCount(1, kret) # derived pass by ref k = li_boost_shared_ptr.KlassDerived("me oh my") kret = li_boost_shared_ptr.derivedreftest(k) val = kret.getValue() self.verifyValue("me oh my derivedreftest-Derived", val) self.verifyCount(1, k) self.verifyCount(1, kret) # //////////////////////////////// Derived and base class mixed //////////////////////////////////////// # pass by shared_ptr (mixed) k = li_boost_shared_ptr.KlassDerived("me oh my") kret = li_boost_shared_ptr.smartpointertest(k) val = kret.getValue() self.verifyValue("me oh my smartpointertest-Derived", val) self.verifyCount(2, k) self.verifyCount(2, kret) # pass by shared_ptr pointer (mixed) k = li_boost_shared_ptr.KlassDerived("me oh my") kret = li_boost_shared_ptr.smartpointerpointertest(k) val = kret.getValue() self.verifyValue("me oh my smartpointerpointertest-Derived", val) self.verifyCount(2, k) self.verifyCount(2, kret) # pass by shared_ptr reference (mixed) k = li_boost_shared_ptr.KlassDerived("me oh my") kret = li_boost_shared_ptr.smartpointerreftest(k) val = kret.getValue() self.verifyValue("me oh my smartpointerreftest-Derived", val) self.verifyCount(2, k) self.verifyCount(2, kret) # pass by shared_ptr pointer reference (mixed) k = li_boost_shared_ptr.KlassDerived("me oh my") kret = li_boost_shared_ptr.smartpointerpointerreftest(k) val = kret.getValue() self.verifyValue("me oh my smartpointerpointerreftest-Derived", val) self.verifyCount(2, k) self.verifyCount(2, kret) # pass by value (mixed) k = li_boost_shared_ptr.KlassDerived("me oh my") kret = li_boost_shared_ptr.valuetest(k) val = kret.getValue() self.verifyValue("me oh my valuetest", val) # note slicing self.verifyCount(1, k) self.verifyCount(1, kret) # pass by pointer (mixed) k = li_boost_shared_ptr.KlassDerived("me oh my") kret = li_boost_shared_ptr.pointertest(k) val = kret.getValue() self.verifyValue("me oh my pointertest-Derived", val) self.verifyCount(1, k) self.verifyCount(1, kret) # pass by ref (mixed) k = li_boost_shared_ptr.KlassDerived("me oh my") kret = li_boost_shared_ptr.reftest(k) val = kret.getValue() self.verifyValue("me oh my reftest-Derived", val) self.verifyCount(1, k) self.verifyCount(1, kret) # //////////////////////////////// Overloading tests //////////////////////////////////////// # Base class k = li_boost_shared_ptr.Klass("me oh my") self.verifyValue(li_boost_shared_ptr.overload_rawbyval(k), "rawbyval") self.verifyValue(li_boost_shared_ptr.overload_rawbyref(k), "rawbyref") self.verifyValue(li_boost_shared_ptr.overload_rawbyptr(k), "rawbyptr") self.verifyValue(li_boost_shared_ptr.overload_rawbyptrref(k), "rawbyptrref") self.verifyValue(li_boost_shared_ptr.overload_smartbyval(k), "smartbyval") self.verifyValue(li_boost_shared_ptr.overload_smartbyref(k), "smartbyref") self.verifyValue(li_boost_shared_ptr.overload_smartbyptr(k), "smartbyptr") self.verifyValue(li_boost_shared_ptr.overload_smartbyptrref(k), "smartbyptrref") # Derived class k = li_boost_shared_ptr.KlassDerived("me oh my") self.verifyValue(li_boost_shared_ptr.overload_rawbyval(k), "rawbyval") self.verifyValue(li_boost_shared_ptr.overload_rawbyref(k), "rawbyref") self.verifyValue(li_boost_shared_ptr.overload_rawbyptr(k), "rawbyptr") self.verifyValue(li_boost_shared_ptr.overload_rawbyptrref(k), "rawbyptrref") self.verifyValue(li_boost_shared_ptr.overload_smartbyval(k), "smartbyval") self.verifyValue(li_boost_shared_ptr.overload_smartbyref(k), "smartbyref") self.verifyValue(li_boost_shared_ptr.overload_smartbyptr(k), "smartbyptr") self.verifyValue(li_boost_shared_ptr.overload_smartbyptrref(k), "smartbyptrref") # 3rd derived class k = li_boost_shared_ptr.Klass3rdDerived("me oh my") val = k.getValue() self.verifyValue("me oh my-3rdDerived", val) self.verifyCount(1, k) val = li_boost_shared_ptr.test3rdupcast(k) self.verifyValue("me oh my-3rdDerived", val) self.verifyCount(1, k) # //////////////////////////////// Member variables //////////////////////////////////////// # smart pointer by value m = li_boost_shared_ptr.MemberVariables() k = li_boost_shared_ptr.Klass("smart member value") m.SmartMemberValue = k val = k.getValue() self.verifyValue("smart member value", val) self.verifyCount(2, k) kmember = m.SmartMemberValue val = kmember.getValue() self.verifyValue("smart member value", val) self.verifyCount(3, kmember) self.verifyCount(3, k) del m self.verifyCount(2, kmember)
# -*- coding: utf-8 -*- # See LICENSE file for full copyright and licensing details. from odoo import api, fields, models class SaleAdvancePaymentInv(models.TransientModel): _inherit = "sale.advance.payment.inv" @api.model def _get_advance_payment(self): ctx = self.env.context.copy() if self._context.get('active_model') == 'hotel.folio': hotel_fol = self.env['hotel.folio'] hotel = hotel_fol.browse(self._context.get('active_ids', [])) ctx.update({'active_ids': [hotel.order_id.id], 'active_id': hotel.order_id.id}) return super(SaleAdvancePaymentInv, self.with_context(ctx))._get_advance_payment_method() advance_payment_method = fields.Selection([('delivered', 'Invoiceable lines'), ('all',
'Invoiceable lines\ (deduct down payments)'),
('percentage', 'Down payment (percentage)'), ('fixed', 'Down payment (fixed\ amount)')], string='What do you want\ to invoice?', default=_get_advance_payment, required=True) @api.multi def create_invoices(self): ctx = self.env.context.copy() if self._context.get('active_model') == 'hotel.folio': hotel_fol = self.env['hotel.folio'] hotel = hotel_fol.browse(self._context.get('active_ids', [])) ctx.update({'active_ids': [hotel.order_id.id], 'active_id': hotel.order_id.id, 'folio_id': hotel.id}) res = super(SaleAdvancePaymentInv, self.with_context(ctx)).create_invoices() return res
import time import json import random from flask import Flask, request, current_app, abort from functools import wraps from cloudbrain.utils.metadata_info import (map_metric_name_to_num_channels, get_supported_devices, get_metrics_names) from cloudbrain.settings import WEBSERVER_PORT _API_VERSION = "v1.0" app = Flask(__name__) app.config['PROPAGATE_EXCEPTIONS'] = True from cloudbrain.datastore.CassandraDAO import CassandraDAO dao = CassandraDAO() dao.connect() def support_jsonp(f): """Wraps JSONif
ied output for JSONP""" @wraps(f) def decorated_function(*args, **kwargs): callback = request.args.get('callback', False) if callback: content = str(callback) + '(' + str(f()) + ')' return current_app.response_class(content, mimetype='application/json') else: return f(*args, **kwargs) return decorated_function
@app.route('/data', methods=['GET']) @support_jsonp def data(): """ GET metric data :return: """ # return last 5 microseconds if start not specified. default_start_timestamp = int(time.time() * 1000000 - 5) device_id = request.args.get('device_id', None) device_name = request.args.get('device_name', None) metric = request.args.get('metric', None) start = int(request.args.get('start', default_start_timestamp)) if not device_name: return "missing param: device_name", 500 if not metric: return "missing param: metric", 500 if not device_id: return "missing param: device_id", 500 # data_records = _get_mock_data(device_name, metric) data_records = dao.get_data(device_name, device_id, metric, start) return json.dumps(data_records) def _get_mock_data(device_name, metric): metric_to_num_channels = map_metric_name_to_num_channels(device_name) num_channels = metric_to_num_channels[metric] now = int(time.time() * 1000000 - 5) # micro seconds data_records = [] for i in xrange(5): record = {'timestamp': now + i} for j in xrange(num_channels): channel_name = 'channel_%s' % j record[channel_name] = random.random() * 10 data_records.append(record) return data_records @app.route('/metadata/devices', methods=['GET']) @support_jsonp def get_device_names(): """ Returns the device names from the metadata file """ return json.dumps(get_supported_devices()) @app.route('/registered_devices', methods=['GET']) @support_jsonp def get_registered_devices(): """ Get the registered devices IDs """ registered_devices = dao.get_registered_devices() return json.dumps(registered_devices) """ Tags """ def _generate_mock_tags(user_id, tag_name): if tag_name is None: tag_names = ["Facebook", "Netflix", "TechCrunch"] else: tag_names = [tag_name] tags = [] for tag_name in tag_names: tags.append( {"tag_id": "c1f6e1f2-c964-48c0-8cdd-fafe8336190b", "user_id": user_id, "tag_name": tag_name, "metadata": {}, "start": int(time.time() * 1000) - 10, "end": int(time.time() * 1000) }) return tags def generate_mock_tag(user_id, tag_id): tag = {"tag_id": tag_id, "user_id": user_id, "tag_name": "label_1", "metadata": {}, "start": int(time.time() * 1000) - 10, "end": int(time.time() * 1000) } return tag @app.route('/api/%s/users/<string:user_id>/tags' % _API_VERSION, methods=['GET']) @support_jsonp def get_tags(user_id): """Retrieve all tags for a specific user """ tag_name = request.args.get('tag_name', None) #tags = _generate_mock_tags(user_id, tag_name) tags = dao.get_tags(user_id, tag_name) return json.dumps(tags), 200 @app.route('/api/%s/users/<string:user_id>/tags/<string:tag_id>' % _API_VERSION, methods=['GET']) @support_jsonp def get_tag(user_id, tag_id): """Retrieve a specific tag for a specific user """ #tag = dao.get_mock_tag(user_id, tag_id) tag = dao.get_tag(user_id, tag_id) return json.dumps(tag), 200 @app.route('/api/%s/users/<string:user_id>/tags' % _API_VERSION, methods=['POST']) @support_jsonp def create_tag(user_id): if (not request.json or not 'tag_name' in request.json or not 'start' in request.json): abort(400) tag_name = request.json.get("tag_name") metadata = request.json.get("metadata") start = request.json.get("start") end = request.json.get("end") #tag_id = "c1f6e1f2-c964-48c0-8cdd-fafe8336190b" tag_id = dao.create_tag(user_id, tag_name, metadata, start, end) return json.dumps({"tag_id": tag_id}), 500 """ Tag aggregates""" def _generate_mock_tag_aggregates(user_id, tag_id, device_type, metrics): aggregates = [] for metric in metrics: aggregates.append( { "aggregate_id": "c1f6e1f2-c964-48c0-8cdd-fafe83361977", "user_id": user_id, "tag_id": tag_id, "aggregate_type": "avg", "device_type": device_type, "aggregate_value": random.random() * 10, "metric": metric, "start": int(time.time() * 1000) - 10, "end": int(time.time() * 1000) }) return aggregates @app.route(('/api/%s/users/<string:user_id>/tags/<string:tag_id>/aggregates' % _API_VERSION), methods=['GET']) @support_jsonp def get_tag_aggregate(user_id, tag_id): """Retrieve all aggregates for a specific tag and user""" device_type = request.args.get('device_type', None) metrics = request.args.getlist('metrics', None) if device_type is None and len(metrics) == 0: device_types = get_supported_devices() for device_type in device_types: metrics.extend(get_metrics_names(device_type)) elif len(metrics) == 0 and device_type is not None: metrics = get_metrics_names(device_type) elif len(metrics) > 0 and device_type is None: return "parameter 'device_type' is required to filter on `metrics`", 500 #aggregates = _generate_mock_tag_aggregates(user_id, tag_id, device_type, metrics) aggregates = dao.get_aggregates(user_id, tag_id, device_type, metrics) return json.dumps(aggregates), 200 if __name__ == "__main__": app.run(host="0.0.0.0", port=WEBSERVER_PORT)
import sys import re # Copyright # ========= # Copyright (C) 2015 Trustwave Holdings, Inc. # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This pr
ogram is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/> # # # python cherryPicker.py [filename] by Eric Merritt 2015-04-09 # # =Synopsis # # This is a simple python scri
pt that decrypts the encoded config files # for Cherry Picker malware. It is encoded with a XOR string # # Input: filename or none to use the default kb852310.dll filename # # Example: python cherryPicker.py # # Example: python cherryPicker.py filename.dll # # Output: config.bin (decrypted config file) xor_key = ['0xE6', '0x96', '0x03', '0x00', '0x84', '0x03', '0x01', '0x32', '0x4D', '0x36', '0xD0', '0x35', '0x5F', '0x62', '0x65', '0x01'] def _ror(val, bits, bit_size): return ((val & (2 ** bit_size - 1)) >> bits % bit_size) | \ (val << (bit_size - (bits % bit_size)) & (2 ** bit_size - 1)) __ROR4__ = lambda val, bits: _ror(val, bits, 32) def DWORD(list, start): i = 0 result = '0x' while i < 4: if type(list[start + 3]) == int: result = result + format(list[start + 3], '02x') else: result = result + list[start + 3][2:] i = i + 1 start = start - 1 return result def replace_bytes(buffer, start, value): i = 4 indx = 0 value = re.findall('..', value.split('0x')[1]) while i > 0: buffer[start + indx] = int(value[i-1], 16) i = i - 1 indx = indx + 1 def round_dword(value): number = value.split('0x')[1] if len(number) > 8: number = number[len(number) - 8:len(number)] elif len(number) < 8: for i in range(0, 8-len(number)): number = '0' + number return '0x' + number def decrypt_config(buffer): counter = 2208 while(counter >= 0): v2 = 48 while v2: v4 = (v2 & 3) * 4 xor = int(DWORD(xor_key, v4), 16) op1 = int(DWORD(buffer, counter + 4 * ((v2 - 1) & 3)), 16) op1 = round_dword(hex(op1 * 2)) op2 = DWORD(buffer, counter + 4 * ((v2 + 1) & 3)) newval = int(op1, 16) ^ int(op2, 16) value = v2 ^ xor ^ newval result = __ROR4__(value, 8) v2 = v2 - 1 result = round_dword( hex((result * 9) ^ int(DWORD(buffer, counter + v4), 16))) result = round_dword(hex(xor ^ int(result, 16))) # Replace the buffer with the new value replace_bytes(buffer, counter + v4, result) counter = counter - 1 return buffer try: if len(sys.argv) != 1: f = open(sys.argv[1], 'rb') else: f = open('kb852310.dll', 'rb') except IOError as e: print e sys.exit(1) buff = [ord(i) for i in f.read()] decrypt_config(buff) g = open('config.bin', 'wb') g.write(bytearray(buff)) f.close() g.close()
from operator import itemgetter __author__ = 'davide' def pairwise(l): for t in zip(l, l[1:]): yield t def pijavskij(f, L, a, b, eps=1E-5): l = [(a, f(a)), (b, f(b))] while True: imin, Rmin, xmin = -1, float("inf"), -1 fo
r i, t in enumerate(pairwise(l)): (xi, fi), (xj, fj) = t R = (fi + fj - L * (xj - xi)) / 2 if R < Rmin: imin = i
Rmin = R xmin = (xi + xj) / 2 - (fj - fi) / (2 * L) if l[imin + 1][0] - l[imin][0] < eps: return l[imin], l[imin + 1] l.append((xmin, f(xmin))) l.sort(key=itemgetter(0)) print(l) if __name__ == "__main__": f = lambda x: x ** 4 t = pijavskij(f, 50, -100, 100, eps=1E-10) print(t)
ibrary build action act = SCons.Action.Action(BuildLibInstallAction, 'Install compiled library... $TARGET') bld = Builder(action = act) Env.Append(BUILDERS = {'BuildLib': bld}) # parse rtconfig.h to get used component PreProcessor = SCons.cpp.PreProcessor() f = file('rtconfig.h', 'r') contents = f.read() f.close() PreProcessor.process_contents(contents) BuildOptions = PreProcessor.cpp_namespace # add copy option AddOption('--copy', dest='copy', action='store_true', default=False, help='copy rt-thread directory to local.') AddOption('--copy-header', dest='copy-header', action='store_true', default=False, help='copy header of rt-thread directory to local.') AddOption('--cscope', dest='cscope', action='store_true', default=False, help='Build Cscope cross reference database. Requires cscope installed.') AddOption('--clang-analyzer', dest='clang-analyzer', action='store_true', default=False, help='Perform static analyze with Clang-analyzer. '+\ 'Requires Clang installed.\n'+\ 'It is recommended to use with scan-build like this:\n'+\ '`scan-build scons --clang-analyzer`\n'+\ 'If things goes well, scan-build will instruct you to invoke scan-view.') if GetOption('clang-analyzer'): # perform what scan-build does env.Replace( CC = 'ccc-analyzer', CXX = 'c++-analyzer', # skip as and link LINK = 'true', AS = 'true',) env["ENV"].update(x for x in os.environ.items() if x[0].startswith("CCC_")) # only check, don't compile. ccc-analyzer use CCC_CC as the CC. # fsyntax-only will give us some additional warning messages env['ENV']['CCC_CC'] = 'clang' env.Append(CFLAGS=['-fsyntax-only', '-Wall', '-Wno-invalid-source-encoding']) env['ENV']['CCC_CXX'] = 'clang++' env.Append(CXXFLAGS=['-fsyntax-only', '-Wall', '-Wno-invalid-source-encoding']) # remove the POST_ACTION as it will cause meaningless errors(file not # found or something like that). rtconfig.POST_ACTION = '' # add build library option AddOption('--buildlib', dest='buildlib', type='string', help='building library of a component') AddOption('--cleanlib', dest='cleanlib', action='store_true', default=False, help='clean up the library by --buildlib') # add target option AddOption('--target', dest='target', type='string', help='set target project: mdk/iar/vs/ua') #{target_name:(CROSS_TOOL, PLATFORM)} tgt_dict = {'mdk':('keil', 'armcc'), 'mdk4':('keil', 'armcc'), 'iar':('iar', 'iar'), 'vs':('msvc', 'cl'), 'vs2012':('msvc', 'cl'), 'cb':('keil', 'armcc'), 'ua':('keil', 'armcc')} tgt_name = GetOption('target') if tgt_name: # --target will change the toolchain settings which clang-analyzer is # depend on if GetOption('clang-analyzer'): print '--clang-analyzer cannot be used with --target' sys.exit(1) SetOption('no_exec', 1) try: rtconfig.CROSS_TOOL, rtconfig.PLATFORM = tgt_dict[tgt_name] except KeyError: print 'Unknow target: %s. Avaible targets: %s' % \ (tgt_name, ', '.join(tgt_dict.keys())) sys.exit(1) elif (GetDepend('RT_USING_NEWLIB') == False and GetDepend('RT_USING_NOLIBC') == False) \ and rtconfig.PLATFORM == 'gcc': AddDepend('RT_USING_MINILIBC') # add comstr option AddOption('--verbose', dest='verbose', action='store_true', default=False, help='print verbose information during build') if not GetOption('verbose'): # override the default verbose command string env.Replace( ARCOMSTR = 'AR $TARGET', ASCOMSTR = 'AS $TARGET', ASPPCOMSTR = 'AS $TARGET', CCCOMSTR = 'CC $TARGET', CXXCOMSTR = 'CXX $TARGET', LINKCOMSTR = 'LINK $TARGET' ) # we need to seperate the variant_dir for BSPs and the kernels. BSPs could # have their own components etc. If they point to the same folder, SCons # would find the wrong source code to compile. bsp_vdir = 'build/bsp' kernel_vdir = 'build/kernel' # board build script objs = SConscript('SConscript', variant_dir=bsp_vdir, duplicate=0) # include kernel objs.extend(SConscript(Rtt_Root + '/src/SConscript', variant_dir=kernel_vdir + '/src', duplicate=0)) # include libcpu if not has_libcpu: objs.extend(SConscript(Rtt_Root + '/libcpu/SConscript', variant_dir=kernel_vdir + '/libcpu', duplicate=0)) # include components objs.extend(SConscript(Rtt_Root + '/components/SConscript', variant_dir=kernel_vdir + '/components', duplicate=0, exports='remove_components')) return objs def PrepareModuleBuilding(env, root_directory): import rtconfig global Env global Rtt_Root Env = env Rtt_Root = root_directory # add build/clean library option for library checking AddOption('--buildlib', dest='buildlib', type='string', help='building library of a component') AddOption('--cleanlib', dest='cleanlib', action='store_true', default=False, help='clean up the library by --buildlib') # add program path env.PrependENVPath('PATH', rtconfig.EXEC_PATH) def GetConfigValue(name): assert type(name) == str, 'GetConfigValue: only string parameter is valid' try: return BuildOptions[name] except: return '' def GetDepend(depend): building = True if type(depend) == type('str'): if not BuildOptions.has_key(depend) or BuildOptions[depend] == 0: building = False elif BuildOptions[depend] != '': return BuildOptions[depend] return building # for list type depend for item in depend: if item != '': if not BuildOptions.has_key(item) or BuildOptions[item] == 0: building = False return building def AddDepend(option): BuildOptions[option] = 1 def MergeGroup(src_group, group): src_group['src'] = src_group['src'] + group['src'] if group.has_key('CCFLAGS'): if src_group.has_key('CCFLAGS'): src_group['CCFLAGS'] = src_group['CCFLAGS'] + group['CCFLAGS'] else: src_group['CCFLAGS'] = group['CCFLAGS'] if group.has_key('CPPPATH'):
if src_group.has_key('CPPPATH'): src_group['CPPPATH'] = src_group[
'CPPPATH'] + group['CPPPATH'] else: src_group['CPPPATH'] = group['CPPPATH'] if group.has_key('CPPDEFINES'): if src_group.has_key('CPPDEFINES'): src_group['CPPDEFINES'] = src_group['CPPDEFINES'] + group['CPPDEFINES'] else: src_group['CPPDEFINES'] = group['CPPDEFINES'] if group.has_key('LINKFLAGS'): if src_group.has_key('LINKFLAGS'): src_group['LINKFLAGS'] = src_group['LINKFLAGS'] + group['LINKFLAGS'] else: src_group['LINKFLAGS'] = group['LINKFLAGS'] if group.has_key('LIBS'): if src_group.has_key('LIBS'): src_group[
# Copyright (C) 2010-2011 Richard Lincoln # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to # deal in the Software without restriction, including without limitation the # rights to use, copy, modify, merge, publish, distribute, sublicense, and/or # sell copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONT
RACT, TORT OR OTHERWISE, ARISING # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS # IN THE SOFTWARE. from CIM15.IEC61970.Wires.Conductor import Conductor class DCLineSegment(Conductor): """A wire or combination of wires not insulated from one another, with consistent electrical characteristics, used to carry direct current between points in the DC region of the power system.A wire or combina
tion of wires not insulated from one another, with consistent electrical characteristics, used to carry direct current between points in the DC region of the power system. """ def __init__(self, dcSegmentInductance=0.0, dcSegmentResistance=0.0, *args, **kw_args): """Initialises a new 'DCLineSegment' instance. @param dcSegmentInductance: Inductance of the DC line segment. @param dcSegmentResistance: Resistance of the DC line segment. """ #: Inductance of the DC line segment. self.dcSegmentInductance = dcSegmentInductance #: Resistance of the DC line segment. self.dcSegmentResistance = dcSegmentResistance super(DCLineSegment, self).__init__(*args, **kw_args) _attrs = ["dcSegmentInductance", "dcSegmentResistance"] _attr_types = {"dcSegmentInductance": float, "dcSegmentResistance": float} _defaults = {"dcSegmentInductance": 0.0, "dcSegmentResistance": 0.0} _enums = {} _refs = [] _many_refs = []
doLayout(sections16, sections32seg, sections32flat): # Determine 16bit positions textsections = getSectionsPrefix(sections16, '.text.') rod
atasections = (getSectionsPrefix(sections16, '.rodata.str1.1') + getSectionsPrefix(sections16, '.rodata.__func__.')) datasections = getSectionsPrefix(sections16, '.data16.') fixedsections = getSectionsPrefix(sections16, '.fixedaddr.') locs16fixed, firstfixed = fitSections(fixedsections, textsections) prunesections = [i[1] for i in locs16fixed] remsections = [i for
i in textsections+rodatasections+datasections if i not in prunesections] locs16, code16_start = getSectionsStart(remsections, firstfixed) locs16 = locs16 + locs16fixed locs16.sort() # Determine 32seg positions textsections = getSectionsPrefix(sections32seg, '.text.') rodatasections = (getSectionsPrefix(sections32seg, '.rodata.str1.1') + getSectionsPrefix(sections32seg, '.rodata.__func__.')) datasections = getSectionsPrefix(sections32seg, '.data32seg.') locs32seg, code32seg_start = getSectionsStart( textsections + rodatasections + datasections, code16_start) # Determine 32flat positions textsections = getSectionsPrefix(sections32flat, '.text.') rodatasections = getSectionsPrefix(sections32flat, '.rodata') datasections = getSectionsPrefix(sections32flat, '.data.') bsssections = getSectionsPrefix(sections32flat, '.bss.') locs32flat, code32flat_start = getSectionsStart( textsections + rodatasections + datasections + bsssections , code32seg_start + BUILD_BIOS_ADDR, 16) # Print statistics size16 = BUILD_BIOS_SIZE - code16_start size32seg = code16_start - code32seg_start size32flat = code32seg_start + BUILD_BIOS_ADDR - code32flat_start print "16bit size: %d" % size16 print "32bit segmented size: %d" % size32seg print "32bit flat size: %d" % size32flat return locs16, locs32seg, locs32flat ###################################################################### # Linker script output ###################################################################### # Write LD script includes for the given cross references def outXRefs(xrefs, finallocs, delta=0): out = "" for symbol, (fileid, section, addr) in xrefs.items(): if fileid < 2: addr += delta out += "%s = 0x%x ;\n" % (symbol, finallocs[(fileid, section)] + addr) return out # Write LD script includes for the given sections using relative offsets def outRelSections(locs, startsym): out = "" for addr, sectioninfo in locs: size, align, name = sectioninfo out += ". = ( 0x%x - %s ) ;\n" % (addr, startsym) if name == '.rodata.str1.1': out += "_rodata = . ;\n" out += "*(%s)\n" % (name,) return out # Layout the 32bit segmented code. This places the code as high as possible. def writeLinkerScripts(locs16, locs32seg, locs32flat , xref16, xref32seg, xref32flat , out16, out32seg, out32flat): # Index to final location for each section # finallocs[(fileid, section)] = addr finallocs = {} for fileid, locs in ((0, locs16), (1, locs32seg), (2, locs32flat)): for addr, sectioninfo in locs: finallocs[(fileid, sectioninfo[2])] = addr # Write 16bit linker script code16_start = locs16[0][0] output = open(out16, 'wb') output.write(COMMONHEADER + outXRefs(xref16, finallocs) + """ code16_start = 0x%x ; .text16 code16_start : { """ % (code16_start) + outRelSections(locs16, 'code16_start') + """ } """ + COMMONTRAILER) output.close() # Write 32seg linker script code32seg_start = code16_start if locs32seg: code32seg_start = locs32seg[0][0] output = open(out32seg, 'wb') output.write(COMMONHEADER + outXRefs(xref32seg, finallocs) + """ code32seg_start = 0x%x ; .text32seg code32seg_start : { """ % (code32seg_start) + outRelSections(locs32seg, 'code32seg_start') + """ } """ + COMMONTRAILER) output.close() # Write 32flat linker script output = open(out32flat, 'wb') output.write(COMMONHEADER + outXRefs(xref32flat, finallocs, BUILD_BIOS_ADDR) + """ code32flat_start = 0x%x ; .text code32flat_start : { """ % (locs32flat[0][0]) + outRelSections(locs32flat, 'code32flat_start') + """ . = ( 0x%x - code32flat_start ) ; *(.text32seg) . = ( 0x%x - code32flat_start ) ; *(.text16) code32flat_end = ABSOLUTE(.) ; } :text """ % (code32seg_start + BUILD_BIOS_ADDR, code16_start + BUILD_BIOS_ADDR) + COMMONTRAILER + """ ENTRY(post32) PHDRS { text PT_LOAD AT ( code32flat_start ) ; } """) output.close() ###################################################################### # Section garbage collection ###################################################################### # Find and keep the section associated with a symbol (if available). def keepsymbol(symbol, infos, pos, callerpos=None): addr, section = infos[pos][1].get(symbol, (None, None)) if section is None or '*' in section or section[:9] == '.discard.': return -1 if callerpos is not None and symbol not in infos[callerpos][4]: # This symbol reference is a cross section reference (an xref). # xref[symbol] = (fileid, section, addr) infos[callerpos][4][symbol] = (pos, section, addr) keepsection(section, infos, pos) return 0 # Note required section, and recursively set all referenced sections # as required. def keepsection(name, infos, pos=0): if name in infos[pos][3]: # Already kept - nothing to do. return infos[pos][3].append(name) relocs = infos[pos][2].get(name) if relocs is None: return # Keep all sections that this section points to for symbol in relocs: ret = keepsymbol(symbol, infos, pos) if not ret: continue # Not in primary sections - it may be a cross 16/32 reference ret = keepsymbol(symbol, infos, (pos+1)%3, pos) if not ret: continue ret = keepsymbol(symbol, infos, (pos+2)%3, pos) if not ret: continue # Return a list of kept sections. def getSectionsList(sections, names): return [i for i in sections if i[2] in names] # Determine which sections are actually referenced and need to be # placed into the output file. def gc(info16, info32seg, info32flat): # infos = ((sections, symbols, relocs, keep sections, xrefs), ...) infos = ((info16[0], info16[1], info16[2], [], {}), (info32seg[0], info32seg[1], info32seg[2], [], {}), (info32flat[0], info32flat[1], info32flat[2], [], {})) # Start by keeping sections that are globally visible. for size, align, section in info16[0]: if section[:11] == '.fixedaddr.' or '.export.' in section: keepsection(section, infos) keepsymbol('post32', infos, 0, 2) # Return sections found. keep16 = getSectionsList(info16[0], infos[0][3]), infos[0][4] keep32seg = getSectionsList(info32seg[0], infos[1][3]), infos[1][4] keep32flat = getSectionsList(info32flat[0], infos[2][3]), infos[2][4] return keep16, keep32seg, keep32flat ###################################################################### # Startup and input parsing ###################################################################### # Read in output from objdump def parseObjDump(file): # sections = [(size, align, section), ...] sections = [] # symbols[symbol] = (addr, section) symbols = {} # relocs[section] = [symbol, ...] relocs = {} state = None for line in file.readlines(): line = line.rstrip() if line == 'Sections:': state = 'section' continue if line == 'SYMBOL TABLE:': state = 'symbol'
# -*- coding: utf-8 -*- """ Created on Sun Apr 23 11:27:24 2017 @author: hd_mysky """ import pymongo as mongo import pandas as pd import os from transform import harmonize_data BASE_DIR = os.path.dirname(__file__) #获取当前文件的父目录绝对路径 file_path = os.path.join(BASE_DIR,'dataset','source_simple.csv') conn = mongo.MongoClient('mongodb://localhost:27017') jobs = conn.lagou.jobs cursor = jobs.find({'positionTag': '技术'}) fields = ['workYear', 'education', 'city', 'positionTag', 'financeStage', 'companySize', 'salaryAvg'] train = pd.DataFrame(list(cursor), columns=fields) train_data = harmonize
_data(train) train_data.to_csv(file_path) print('—————
—————数据转换成功——————————')
from __future__ impo
rt absolute_import import unittest import deviantart from .helpers import mock_response, optional from .api_credentials import CLIENT_ID, CLIENT_SECRET class ApiTest(unittest.TestCase): @optional(CLIENT_ID == "", mock_response('token')) def setUp(self): self.da = deviantart.Api(CLIENT_ID, CLIENT_SECRET) @optional(CLIENT_ID == "", mock_response('user_profile_devart')) def test_get_user(self): user = self.da.get_user("devart") self.assertEqual("devart
", user.username) self.assertEqual("devart", repr(user)) @optional(CLIENT_ID == "", mock_response('deviation')) def test_get_deviation(self): deviation = self.da.get_deviation("234546F5-C9D1-A9B1-D823-47C4E3D2DB95") self.assertEqual("234546F5-C9D1-A9B1-D823-47C4E3D2DB95", deviation.deviationid) self.assertEqual("234546F5-C9D1-A9B1-D823-47C4E3D2DB95", repr(deviation)) @optional(CLIENT_ID == "", mock_response('comments_siblings')) def test_get_comment(self): comments = self.da.get_comments("siblings", commentid="E99B1CEB-933F-B54D-ABC2-88FD0F66D421") comment = comments['thread'][0] self.assertEqual("E99B1CEB-933F-B54D-ABC2-88FD0F66D421", comment.commentid) self.assertEqual("E99B1CEB-933F-B54D-ABC2-88FD0F66D421", repr(comment))
# Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES O
R CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. def testFunction(request): return "PASS" import os # os.environ["FOO"] is only available
at runtime. print(os.environ["FOO"])
# -*- coding: utf-8 -*- """QGIS Unit tests for QgsImageCache. .. note:: This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. """ __author__ = '(C) 2018 by Nyall Dawson' __date__ = '02/10/2018' __copyright__ = 'Copyright 2018, The QGIS Project' import qgis # NOQA import os import socketserver import threading import http.server import time from qgis.PyQt.QtCore import QDir, QCoreApplication, QSize from qgis.PyQt.QtGui import QColor, QImage, QPainter from qgis.core import (QgsImageCache, QgsRenderChecker, QgsApplication, QgsMultiRenderChecker) from qgis.testing import start_app, unittest from utilities import unitTestDataPath start_app() TEST_DATA_DIR = unitTestDataPath() class SlowHTTPRequestHandler(http.server.SimpleHTTPRequestHandler): def do_GET(self): time.sleep(1) return http.server.SimpleHTTPRequestHandler.do_GET(self) class TestQgsImageCache(unittest.TestCase): @classmethod def setUpClass(cls): # Bring up a simple HTTP server, for remote SVG tests os.chdir(unitTestDataPath() + '') handler = SlowHTTPRequestHandler cls.httpd = socketserver.TCPServer(('localhost', 0), handler) cls.port = cls.httpd.server_address[1] cls.httpd_thread = threading.Thread(target=cls.httpd.serve_forever) cls.httpd_thread.setDaemon(True) cls.httpd_thread.start() def setUp(self): self.report = "<h1>Python QgsImageCache Tests</h1>\n" self.fetched = False QgsApplication.imageCache().remoteImageFetched.connect(self.imageFetched) def tearDown(self): report_file_path = "%s/qgistest.html
" % QDir.tempPath() with open(report_file_path, 'a') as report_file: report_file.write(self.report) def imageFetched(self):
self.fetched = True def waitForFetch(self): self.fetched = False while not self.fetched: QCoreApplication.processEvents() def testRemoteImage(self): """Test fetching remote image.""" url = 'http://localhost:{}/qgis_local_server/sample_image.png'.format(str(TestQgsImageCache.port)) image, in_cache = QgsApplication.imageCache().pathAsImage(url, QSize(100, 100), True, 1.0) # first should be waiting image self.assertTrue(self.imageCheck('Remote Image', 'waiting_image', image)) self.assertFalse(QgsApplication.imageCache().originalSize(url).isValid()) self.waitForFetch() # second should be correct image image, in_cache = QgsApplication.imageCache().pathAsImage(url, QSize(100, 100), True, 1.0) self.assertTrue(self.imageCheck('Remote Image', 'remote_image', image)) self.assertEqual(QgsApplication.imageCache().originalSize(url), QSize(511, 800), 1.0) def testRemoteImageMissing(self): """Test fetching remote image with bad url""" url = 'http://localhost:{}/qgis_local_server/xxx.png'.format(str(TestQgsImageCache.port)) # oooo naughty image, in_cache = QgsApplication.imageCache().pathAsImage(url, QSize(100, 100), 1.0, True) self.assertTrue(self.imageCheck('Remote image missing', 'waiting_image', image)) def testRemoteImageBlocking(self): """Test fetching remote image.""" # remote not yet requested so not in cache url = 'http://localhost:{}/qgis_local_server/logo_2017.png'.format(str(TestQgsImageCache.port)) image, in_cache = QgsApplication.imageCache().pathAsImage(url, QSize(100, 100), True, 1.0, blocking=1) # first should be correct image self.assertTrue(self.imageCheck('Remote image sync', 'remote_image_blocking', image)) # remote probably in cache url = 'http://localhost:{}/qgis_local_server/sample_image.png'.format(str(TestQgsImageCache.port)) image, in_cache = QgsApplication.imageCache().pathAsImage(url, QSize(100, 100), True, 1.0, blocking=1) self.assertTrue(self.imageCheck('Remote Image', 'remote_image', image)) # remote probably in cache url = 'http://localhost:{}/qgis_local_server/xxx.png'.format(str(TestQgsImageCache.port)) # oooo naughty image, in_cache = QgsApplication.imageCache().pathAsImage(url, QSize(100, 100), True, 1.0, blocking=1) self.assertTrue(self.imageCheck('Remote image missing', 'waiting_image', image)) def imageCheck(self, name, reference_image, image): self.report += "<h2>Render {}</h2>\n".format(name) temp_dir = QDir.tempPath() + '/' file_name = temp_dir + 'image_' + name + ".png" output_image = QImage(image.size(), QImage.Format_RGB32) QgsMultiRenderChecker.drawBackground(output_image) painter = QPainter(output_image) painter.drawImage(0, 0, image) painter.end() output_image.save(file_name, "PNG") checker = QgsRenderChecker() checker.setControlPathPrefix("image_cache") checker.setControlName("expected_" + reference_image) checker.setRenderedImage(file_name) checker.setColorTolerance(2) result = checker.compareImages(name, 20) self.report += checker.report() print((self.report)) return result if __name__ == '__main__': unittest.main()
""" TwoDWalker.py is for controling the avatars in a 2D Scroller game environment. """ from GravityWalker import * from panda3d.core import ConfigVariableBool class TwoDWalker(GravityWalker): """ The TwoDWalker is primarily for a 2D Scroller game environment. Eg - Toon Blitz minigame. TODO: This class is still work in progress. Currently Toon Blitz is using this only for jumping. Moving the Toon left to right is handled by toontown/src/minigame/TwoDDrive.py. I eventually want this class to control all the 2 D movements, possibly with a customizable input list. """ notify = directNotify.newCategory("TwoDWalker") wantDebugIndicator = ConfigVariableBool('want-avatar-physics-indicator', False) wantFloorSphere = ConfigVariableBool('want-floor-sphere', False) earlyEventSphere = ConfigVariableBool('early-event-sphere', False) # special methods def __init__(self, gravity = -32.1740, standableGround=0.707, hardLandingForce=16.0): assert self.notify.debugStateCall(self) self.notify.debug('Constructing TwoDWalker') GravityWalker.__init__(self) def handleAvatarControls(self, task): """ Check on
the arrow keys and update the avatar. """ # get the button states: jump = inputState.isSet("forward") if self.lifter.isOnGround(): if self.isAirborne: self.isAirborne = 0 assert self.debugPrint("isAirborne
0 due to isOnGround() true") impact = self.lifter.getImpactVelocity() messenger.send("jumpLand") assert self.isAirborne == 0 self.priorParent = Vec3.zero() else: if self.isAirborne == 0: assert self.debugPrint("isAirborne 1 due to isOnGround() false") self.isAirborne = 1 return Task.cont def jumpPressed(self): """This function should be called from TwoDDrive when the jump key is pressed.""" if self.lifter.isOnGround(): if self.isAirborne == 0: if self.mayJump: # The jump button is down and we're close enough to the ground to jump. self.lifter.addVelocity(self.avatarControlJumpForce) messenger.send("jumpStart") self.isAirborne = 1 assert self.debugPrint("isAirborne 1 due to jump")
ForSpelling = str.maketrans({ 'ſ': 's', 'ffi': 'ffi', 'ffl': 'ffl', 'ff': 'ff', 'ſt': 'ft', 'fi': 'fi', 'fl': 'fl', 'st': 'st' }) def spellingNormalization (sWord): return unicodedata.normalize("NFC", sWord.translate(_xTransCharsForSpelling)) _xTransCharsForSimplification = str.maketrans({ 'à': 'a', 'é': 'e', 'î': 'i', 'ô': 'o', 'û': 'u', 'ÿ': 'i', "y": "i", 'â': 'a', 'è': 'e', 'ï': 'i', 'ö': 'o', 'ù': 'u', 'ŷ': 'i', 'ä': 'a', 'ê': 'e', 'í': 'i', 'ó': 'o', 'ü': 'u', 'ý': 'i', 'á': 'a', 'ë': 'e', 'ì': 'i', 'ò': 'o', 'ú': 'u', 'ỳ': 'i', 'ā': 'a', 'ē': 'e', 'ī': 'i', 'ō': 'o', 'ū': 'u', 'ȳ': 'i', 'ç': 'c', 'ñ': 'n', 'k': 'q', 'w': 'v', 'œ': 'oe', 'æ': 'ae', 'ſ': 's', 'ffi': 'ffi', 'ffl': 'ffl', 'ff': 'ff', 'ſt': 'ft', 'fi': 'fi', 'fl': 'fl', 'st': 'st', }) def simplifyWord (sWord): "word simplication before calculating distance between words" sWord = sWord.lower().translate(_xTransCharsForSimplification) sNewWord = "" for i, c in enumerate(sWord, 1): if c == 'e' or c != sWord[i:i+1]: # exception for <e> to avoid confusion between crée / créai sNewWord += c return sNewWord.replace("eau", "o").replace("au", "o").replace("ai", "e").replace("ei", "e").replace("ph", "f") aVowel = set("aáàâäāeéèêëēiíìîïīoóòôöōuúùûüūyýỳŷÿȳœæAÁÀÂÄĀEÉÈÊËĒIÍÌÎÏĪOÓÒÔÖŌUÚÙÛÜŪYÝỲŶŸȲŒÆ") aConsonant = set("bcçdfghjklmnñpqrstvwxzBCÇDFGHJKLMNÑPQRSTVWXZ") aDouble = set("bcdfjklmnprstzBCDFJKLMNPRSTZ") # letters that may be used twice successively # Similar chars d1to1 = { "1": "liîLIÎ", "2": "zZ", "3": "eéèêEÉÈÊ", "4": "aàâAÀÂ", "5": "sgSG", "6": "bdgBDG", "7": "ltLT", "8": "bB", "9": "gbdGBD", "0": "oôOÔ", "a": "aAàÀâÂáÁäÄāĀæÆ", "A": "AaÀàÂâÁáÄäĀ⯿", "à": "aAàÀâÂáÁäÄāĀæÆ", "À": "AaÀàÂâÁáÄäĀ⯿", "â": "aAàÀâÂáÁäÄāĀæÆ", "Â": "AaÀàÂâÁáÄäĀ⯿", "á": "aAàÀâÂáÁäÄāĀæÆ", "Á": "AaÀàÂâÁáÄäĀ⯿", "ä": "aAàÀâÂáÁäÄāĀæÆ", "Ä": "AaÀàÂâÁáÄäĀ⯿", "æ": "æÆéÉaA", "Æ": "ÆæÉéAa", "b": "bB", "B": "Bb", "c": "cCçÇsSkKqQśŚŝŜ", "C": "CcÇçSsKkQqŚśŜŝ", "ç": "cCçÇsSkKqQśŚŝŜ", "Ç": "CcÇçSsKkQqŚśŜŝ", "d": "dDðÐ", "D": "DdÐð", "e": "eEéÉèÈêÊëËēĒœŒ", "E": "EeÉéÈèÊêËëĒēŒœ", "é": "eEéÉèÈêÊëËēĒœŒ", "É": "EeÉéÈèÊêËëĒēŒœ", "ê": "eEéÉèÈêÊëËēĒœŒ", "Ê": "EeÉéÈèÊêËëĒēŒœ", "è": "eEéÉèÈêÊëËēĒœŒ", "È": "EeÉéÈèÊêËëĒēŒœ", "ë": "eEéÉèÈêÊëËēĒœŒ", "Ë": "EeÉéÈèÊêËëĒēŒœ", "f": "fF", "F": "Ff", "g": "gGjJĵĴ", "G": "GgJjĴĵ", "h": "hH", "H": "Hh", "i": "iIîÎïÏyYíÍìÌīĪÿŸ", "I": "IiÎîÏïYyÍíÌìĪīŸÿ", "î": "iIîÎïÏyYíÍìÌīĪÿŸ", "Î": "IiÎîÏïYyÍíÌìĪīŸÿ", "ï": "iIîÎïÏyYíÍìÌīĪÿŸ", "Ï": "IiÎîÏïYyÍíÌìĪīŸÿ", "í": "iIîÎïÏyYíÍìÌīĪÿŸ", "Í": "IiÎîÏïYyÍíÌìĪīŸÿ", "ì": "iIîÎïÏyYíÍìÌīĪÿŸ", "Ì": "IiÎîÏïYyÍíÌìĪīŸÿ", "j": "jJgGĵĴ", "J": "JjGgĴĵ", "k": "kKcCqQ", "K": "KkCcQq", "l": "lLłŁ", "L": "LlŁł", "m": "mMḿḾ", "M": "MmḾḿ", "n": "nNñÑńŃǹǸ", "N": "NnÑñŃńǸǹ", "o": "oOôÔóÓòÒöÖōŌœŒ", "O": "OoÔôÓóÒòÖöŌōŒœ", "ô": "oOôÔóÓòÒöÖōŌœŒ", "Ô": "OoÔôÓóÒòÖöŌōŒœ", "ó": "oOôÔóÓòÒöÖōŌœŒ", "Ó": "OoÔôÓóÒòÖöŌōŒœ", "ò": "oOôÔóÓòÒöÖōŌœŒ", "Ò": "OoÔôÓóÒòÖöŌōŒœ", "ö": "oOôÔóÓòÒöÖōŌœŒ", "Ö": "OoÔôÓóÒòÖöŌōŒœ", "œ": "œŒoOôÔeEéÉèÈêÊëË", "Œ": "ŒœOoÔôEeÉéÈèÊêËë", "p": "pPṕṔ", "P": "PpṔṕ", "q": "qQcCkK", "Q": "QqCcKk", "r": "rRŕŔ", "R": "RrŔŕ", "s": "sScCçÇśŚŝŜ", "S": "SsCcÇ猜Ŝŝ", "ś": "sScCçÇśŚŝŜ", "Ś": "SsCcÇ猜Ŝŝ", "ŝ": "sScCçÇśŚŝŜ", "Ŝ": "SsCcÇ猜Ŝŝ", "t": "tT", "T": "Tt", "u": "uUûÛùÙüÜúÚūŪ", "U": "UuÛûÙùÜüÚúŪū", "û": "uUûÛùÙüÜúÚūŪ", "Û": "UuÛûÙùÜüÚúŪū", "ù": "uUûÛùÙüÜúÚūŪ", "Ù": "UuÛûÙùÜüÚúŪū", "ü": "uUûÛùÙüÜúÚūŪ", "Ü": "UuÛûÙùÜüÚúŪū", "ú": "uUûÛùÙüÜúÚūŪ", "Ú": "UuÛûÙùÜüÚúŪū", "v": "vVwW", "V": "VvWw", "w": "wWvV", "W": "WwVv", "x": "xXcCkK", "X": "XxCcKk", "y": "yYiIîÎÿŸŷŶýÝỳỲȳȲ", "Y": "YyIiÎîŸÿŶŷÝýỲỳȲȳ", "ÿ": "yYiIîÎÿŸŷŶýÝỳỲȳȲ", "Ÿ": "YyIiÎîŸÿŶŷÝýỲỳȲȳ", "ŷ": "yYiIîÎÿŸŷŶýÝỳỲȳȲ", "Ŷ": "YyIiÎîŸÿŶŷÝýỲỳȲȳ", "ý": "yYiIîÎÿŸŷŶýÝỳỲȳȲ", "Ý": "YyIiÎîŸÿŶŷÝýỲỳȲȳ", "ỳ": "yYiIîÎÿŸŷŶýÝỳỲȳȲ", "Ỳ": "YyIiÎîŸÿŶŷÝýỲỳȲȳ", "z": "zZsSẑẐźŹ", "Z": "ZzSsẐẑŹź", } d1toX = { "æ": ("ae",), "Æ": ("AE",), "b": ("bb",), "B": ("BB",), "c": ("cc", "ss", "qu", "ch"), "C": ("CC", "SS", "QU", "CH"), "d": ("dd",), "D": ("DD",), "é": ("ai", "ei"), "É": ("AI", "EI"), "f": ("ff", "ph"), "F": ("FF", "PH"), "g": ("gu", "ge", "gg", "gh"), "G": ("GU", "GE", "GG", "GH"), "j": ("jj", "dj"), "J": ("JJ", "DJ"), "k": ("qu", "ck", "ch",
"cu", "kk", "kh"), "K": ("QU", "CK", "CH", "CU", "KK", "KH"), "l": ("ll",), "L": ("LL",), "m": ("mm", "mn"), "M": ("MM", "MN"), "n": ("nn", "nm", "mn"), "N": ("NN", "NM", "MN"), "o
": ("au", "eau"), "O": ("AU", "EAU"), "œ": ("oe", "eu"), "Œ": ("OE", "EU"), "p": ("pp", "ph"), "P": ("PP", "PH"), "q": ("qu", "ch", "cq", "ck", "kk"), "Q": ("QU", "CH", "CQ", "CK", "KK"), "r": ("rr",), "R": ("RR",), "s": ("ss", "sh"), "S": ("SS", "SH"), "t": ("tt", "th"), "T": ("TT", "TH"), "x": ("cc", "ct", "xx"), "X": ("CC", "CT", "XX"), "z": ("ss", "zh"), "Z": ("SS", "ZH"), } def get1toXReplacement (cPrev, cCur, cNext): if cCur in aConsonant and (cPrev in aConsonant or cNext in aConsonant): return () return d1toX.get(cCur, ()) d2toX = { "am": ("an", "en", "em"), "AM": ("AN", "EN", "EM"), "an": ("am", "en", "em"), "AN": ("AM", "EN", "EM"), "au": ("eau", "o", "ô"), "AU": ("EAU", "O", "Ô"), "em": ("an", "am", "en"), "EM": ("AN", "AM", "EN"), "en": ("an", "am", "em"), "EN": ("AN", "AM", "EM"), "ae": ("æ", "é"), "AE": ("Æ", "É"), "ai": ("ei", "é", "è", "ê", "ë"), "AI": ("EI", "É", "È", "Ê", "Ë"), "ei": ("ai", "é", "è", "ê", "ë"), "EI": ("AI", "É", "È", "Ê", "Ë"), "ch": ("sh", "c", "ss"), "CH": ("SH", "C", "SS"), "ct": ("x", "cc"), "CT": ("X", "CC"), "gg": ("gu",), "GG": ("GU",), "gu": ("gg",), "GU": ("GG",), "oa": ("oi",), "OA": ("OI",), "oe": ("œ",), "OE": ("Œ",), "oi": ("oa", "oie"), "OI": ("OA", "OIE"), "ph": ("f",), "PH": ("F",), "qu": ("q", "cq", "ck", "c", "k"), "QU": ("Q", "CQ", "CK", "C", "K"), "ss": ("c", "ç"), "SS": ("C", "Ç"), "un": ("ein",), "UN": ("EIN",), } # End of word dFinal1 = { "a": ("as", "at", "ant", "ah"), "A": ("AS", "AT", "ANT", "AH"), "c": ("ch",), "C": ("CH",), "e": ("et", "er", "ets", "ée", "ez", "ai", "ais", "ait", "ent", "eh"), "E": ("ET", "ER", "ETS", "ÉE", "EZ", "AI", "AIS", "AIT", "ENT", "EH"), "é": ("et", "er", "ets", "ée", "ez", "ai", "ais", "ait"), "É": ("ET", "ER", "ETS", "ÉE", "EZ", "AI", "AIS", "AIT"), "è": ("et", "er", "ets", "ée", "ez", "ai", "ais", "ait"), "È": ("ET", "ER", "ETS", "ÉE", "EZ", "AI", "AIS", "AIT"), "ê": ("et", "er", "ets", "ée", "ez", "ai", "ais", "ait"), "Ê": ("ET", "ER", "ETS", "ÉE", "EZ", "AI", "AIS", "AIT"), "ë": ("et", "er", "ets", "ée", "ez", "ai", "ais", "ait"), "Ë": ("ET", "ER", "ETS", "ÉE", "EZ", "AI", "AIS", "AIT"), "g": ("gh",), "G": ("GH",), "i": ("is", "it", "ie", "in"), "I": ("IS", "IT", "IE", "IN"), "n": ("nt", "nd", "ns", "nh"), "N": ("NT", "ND", "NS", "NH"), "o": ("aut", "ot", "os"), "O": ("AUT", "OT", "OS"), "ô": ("aut", "ot", "os"), "Ô": ("AUT", "OT", "OS"), "ö": ("aut", "ot", "os"), "Ö": ("AUT", "OT", "OS"), "p": ("ph",), "P": ("PH",), "s": ("sh",), "S": ("SH",), "t": ("th",), "T": ("TH",), "u": ("ut", "us", "uh"), "U": ("UT", "US", "UH"), } dFinal2 = { "ai": ("aient", "ais", "et"), "AI": ("AIENT", "AIS", "ET"), "an": ("ant", "ent"), "AN": ("ANT", "ENT"), "en": ("ent", "ant"), "EN": ("ENT", "ANT"), "ei": ("a
#! /usr/bin/python2 # vim: set fileencoding=utf-8 from dateutil.parser import parse from subprocess import check_output from shutil import copy import datetime import sys import os.path import isoweek DATE_FORMAT = '%Y%m%d' START = """\documentclass[a4paper,oneside,draft, notitlepage,11pt,svgnames]{scrreprt} \\newcommand{\workingDate}{\\today} \input{preambule} \\begin{document} """ END = """ \printbibliography{} \end{docu
ment}""" MD_ACTIVITY = """# Activity {.unnumbered} ~~~~ """ def create(date): filename = date.strftime(DATE_FORMAT) month = date.strftime('%B') day = date.strftime('%d') with open('template.tex', 'r') as t: content = t.read() content = content.replace('MONTH', month) content = content.replace('DAY', day) content = content.replace('content', filename+'.tex') with open('current.tex', 'w') as f: f.write(content) co
py('content.md', filename+'.md') print('gvim {}'.format(filename+'.md')) def week(date): week = isoweek.Week.withdate(date) name = 'w{}.tex'.format(week.week) together([week.day(d) for d in range(7)], name) def together(dates, name): include = '\chapter{{{}}}\n\input{{{}}}' res = [include.format(d.strftime('%B %d'), d.strftime(DATE_FORMAT)) for d in dates if os.path.exists(d.strftime(DATE_FORMAT)+'.tex')] with open(name, 'w') as f: f.write(START+'\n'.join(res)+END) print('mv {} w.tex'.format(name)) def log(date): cmd = "git whatchanged --since='{}' --pretty=format:'%B'" cmd += "|sed '/^$/d'|sed 's/^.*\.\.\. //'" since = date.replace(hour=4) log = check_output(cmd.format(str(since)), shell=True).strip()+"\n\n~~~~" log = MD_ACTIVITY + log print(log) return log.replace('\t', ' ') def since(date): today = datetime.datetime.now() name = date.strftime(DATE_FORMAT) + '_' + today.strftime(DATE_FORMAT) days = [(date + datetime.timedelta(days=i)).date() for i in range(1, (today-date).days+1)] together(days, name+'.tex') def finish(date): today = datetime.datetime.now() name = today.strftime(DATE_FORMAT) with open(name+'.md', 'a') as f: f.write(log(today)) cmd = 'pandoc -f markdown -t latex {}.md' cmd += " |grep -v addcontent|sed -e '/^\\\\sec/ s/\\\\label.*$//'" print(cmd.format(name)) latex = check_output(cmd.format(name), shell=True) with open(name+'.tex', 'w') as today_log: today_log.write(latex) print('latexmk -pdf -pvc current') print('mv current.pdf {}.pdf'.format(name)) if __name__ == '__main__': date = datetime.datetime.now() command = 'create' if len(sys.argv) > 1: command = sys.argv[1].strip() if len(sys.argv) > 2: date = parse(sys.argv[2], dayfirst=True) globals()[command](date)
# -*- coding: utf-8 -*- # Copyright (c) 2021 Ansible Project # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ = type import pytest from ansible.module_utils.common.arg_spec import ArgumentSpecValidator, ValidationResult from ansible.module_utils.errors import AnsibleValidationErrorMultiple from ansible.module_utils.six import PY2 # Each item is id, argument_spec, parameters, expected, unsupported parameters, error test string INVALID_SPECS = [ ( 'invalid-list', {'packages': {'type': 'list'}}, {'packages': {'key': 'value'}}, {'packages': {'key': 'value'}}, set(), "unable to convert to list: <class 'dict'> cannot be converted to a list", ), ( 'invalid-dic
t', {'users': {'type': 'dict'}}, {'users': ['one', 'two']}, {'users': ['one', 'two']}, set(), "unable to convert to dict: <class 'list'> cannot be converted to a dict", ), ( 'invalid-bool', {'bool': {'type': 'bool'}}, {'bool': {'k': 'v'}}, {'bool': {'k': 'v'}}, set(), "unable to convert to bool: <class 'dict'> cannot be converted to a bool", ), ( 'invalid-float', {'float': {'
type': 'float'}}, {'float': 'hello'}, {'float': 'hello'}, set(), "unable to convert to float: <class 'str'> cannot be converted to a float", ), ( 'invalid-bytes', {'bytes': {'type': 'bytes'}}, {'bytes': 'one'}, {'bytes': 'one'}, set(), "unable to convert to bytes: <class 'str'> cannot be converted to a Byte value", ), ( 'invalid-bits', {'bits': {'type': 'bits'}}, {'bits': 'one'}, {'bits': 'one'}, set(), "unable to convert to bits: <class 'str'> cannot be converted to a Bit value", ), ( 'invalid-jsonargs', {'some_json': {'type': 'jsonarg'}}, {'some_json': set()}, {'some_json': set()}, set(), "unable to convert to jsonarg: <class 'set'> cannot be converted to a json string", ), ( 'invalid-parameter', {'name': {}}, { 'badparam': '', 'another': '', }, { 'name': None, 'badparam': '', 'another': '', }, set(('another', 'badparam')), "another, badparam. Supported parameters include: name.", ), ( 'invalid-elements', {'numbers': {'type': 'list', 'elements': 'int'}}, {'numbers': [55, 33, 34, {'key': 'value'}]}, {'numbers': [55, 33, 34]}, set(), "Elements value for option 'numbers' is of type <class 'dict'> and we were unable to convert to int: <class 'dict'> cannot be converted to an int" ), ( 'required', {'req': {'required': True}}, {}, {'req': None}, set(), "missing required arguments: req" ) ] @pytest.mark.parametrize( ('arg_spec', 'parameters', 'expected', 'unsupported', 'error'), (i[1:] for i in INVALID_SPECS), ids=[i[0] for i in INVALID_SPECS] ) def test_invalid_spec(arg_spec, parameters, expected, unsupported, error): v = ArgumentSpecValidator(arg_spec) result = v.validate(parameters) with pytest.raises(AnsibleValidationErrorMultiple) as exc_info: raise result.errors if PY2: error = error.replace('class', 'type') assert isinstance(result, ValidationResult) assert error in exc_info.value.msg assert error in result.error_messages[0] assert result.unsupported_parameters == unsupported assert result.validated_parameters == expected
# -*- coding: utf-8 -*- VSVersionInfo( ffi=FixedFileInfo( filevers=(4, 0, 0, 0), prodvers=(4, 0, 0, 0), mask=0x3f, flags=0x0, OS=0x4, fileType=0x1, subtype=0x0, date=(0, 0) ), kids=[ StringFileInfo( [ StringTable( '040904b0',
[StringStruct('CompanyName', u'CommandBrain'), StringStruct('FileDescription', u'Programm for create Usecase
diagram'), StringStruct('FileVersion', '1.0'), StringStruct('LegalCopyright', u'CommandBrain'), ]) ]), VarFileInfo([VarStruct('Translation', [1033, 1200])]) ] )
# -*- coding: utf-8 -*- # Copyright: (c) 2018, SylvainCecchetto # GNU General Public License v2.0+ (see LICENSE.txt or https
://www.gnu.org/licenses/gpl-2.0.txt) # This file is part of Catch-up TV & More from __future__ import unicode_literals import re from codequick import Resolver import urlquick from resources.lib import resolver_proxy # TO DO # Add Replay URL_LIVE = 'https://www.paramountchannel.it/tv/diretta' @Resolver.register def get_live_url(plugin, item_id, **kwargs): resp = urlquick.get(
URL_LIVE, max_age=-1) video_uri = re.compile(r'uri\"\:\"(.*?)\"').findall(resp.text)[0] account_override = 'intl.mtvi.com' ep = 'be84d1a2' return resolver_proxy.get_mtvnservices_stream( plugin, video_uri, False, account_override, ep)
#!/usr/bin/env python """tvnamer - Automagical TV episode renamer Uses data from www.t
hetvdb.com (v
ia tvdb_api) to rename TV episode files from "some.show.name.s01e01.blah.avi" to "Some Show Name - [01x01] - The First.avi" """ __version__ = "3.0.0" __author__ = "dbr/Ben"
# Copyright (c) 2017-present, Facebook, Inc. # All rights reserved. # # This source code is licensed under the license found in the LICENSE file in # the root directory of this source tree. An additional grant of patent rights # can be found in the PATENTS file in the same directory. import math import torch import torch.optim from . import FairseqOptimizer, register_optimizer from apex.contrib.optimizers.fused_adam import FusedAdam from apex.contrib.optimizers.distributed_fused_adam import DistributedFusedAdam from apex.contrib.optimizers.distributed_fused_adam_v2 import DistributedFusedAdamV2 from apex.contrib.optimizers.distributed_fused_adam_v3 import DistributedFusedAdamV3 @register_optimizer('adam') class FairseqAdam(FairseqOptimizer): def __init__(self, args, params): super().__init__(args, params) if self.args.distributed_weight_update == 2: dwu_args = self.distributed_weight_update_config print("DistributedFusedAdam",dwu_args) self._optimizer = DistributedFusedAdam(params, **dwu_args, **self.optimizer_config) elif self.args.distributed_weight_update == 3: dwu_args = self.distributed_weight_update_config print("DistributedFusedAdamV2",dwu_args) self._optimizer = DistributedFusedAdamV2(params, **dwu_args, **self.optimizer_config) elif self.args.distributed_weight_update == 4: dwu_args = self.distributed_weight_update_config print("DistributedFusedAdamV3",dwu_args) self._optimizer = DistributedFusedAdamV3(params, **dwu_args, **self.optimizer_config) else: assert (self.args.distributed_weight_update == 0), "Vanilla optimizer not supported anymore" self._optimizer = FusedAdam(params, **self.optimizer_config) @staticmethod def add_args(parser): """Add optimizer-specific arguments to the parser.""" parser.add_argument('--adam-betas', default='(0.9, 0.999)', metavar='B', help='betas for Adam optimizer') parser.add_argument('--adam-eps', type=float, default=1e-8, metavar='D', help='epsilon for Adam optimizer') @property def optimizer_config(self): """ Return a kwarg dictionary that will be used to override optimizer args stored in checkpoints. This allows us to load a checkpoint and resume training using a different set of optimizer args, e.g., with a different learning rate. """ return { 'lr': self.args.lr[0], 'betas': eval(self.args.adam_betas), 'eps': self.args.adam_eps, 'weight_decay': self.args.weight_decay, } @property def distributed_weight_update_config(self): """ Return a kwarg dictionary that provides arguments for the distributed weight update feature. """ return { 'distributed_weight_update': self.args.distributed_weight_update, 'dwu_group_size': self.args.dwu_group_size, 'dwu_num_blocks': self.args.dwu_num_blocks, 'dwu_num_chunks': self.args.dwu_num_chunks, 'dwu_num_rs_pg': self.args.dwu_num_rs_pg, 'dwu_num_ar_pg': self.args.dwu_num_ar_pg, 'dwu_num_ag_pg': self.args.dwu_num_ag_pg, 'overlap_reductions': self.args.dwu_overlap_reductions, 'full_pipeline': self.args.dwu_full_pipeline, 'compute_L2_grad_norm': self.args.dwu_compute_L2_grad_norm, 'flat_mt': self.args.dwu_flat_mt, 'e5m2_allgather': self.args.dwu_e5m2_allgather, 'do_not_flatten_model': self.args.dwu_do_not_flatten_model, } class Adam(torch.optim.Optimizer): """Implements Adam algorithm. This implementation is modified from torch.optim.Adam based on: `Fixed Weight Decay Regularization in Adam` (see https://arxiv.org/abs/1711.05101) It has been proposed in `Adam: A Method for Stochastic Optimization`_. Arguments: params (iterable): iterable of parameters to optimize or dicts defining parameter groups lr (float, optional): learning rate (default: 1e-3) betas (Tuple[float, float], optional): coefficients used for computing running averages of gradient and its square (default: (0.9, 0.999)) eps (float, optional): term added to the denominator to improve numerical stability (default: 1e-8) weight_decay (float, optional): weight decay (L2 penalty) (default: 0) amsgrad (boolean, optional): whether to use the AMSGrad variant of this algorithm from the paper `On the Convergence of Adam and Beyond`_ .. _Adam\: A Method for Stochastic Optimization: https://arxiv.org/abs/1412.6980 .. _On the Convergence of Adam and Beyond: https://openreview.net/forum?id=ryQu7f-RZ """ def __init__(self, params, lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=0, amsgrad=False): defaults = dict(lr=lr, betas=betas, eps=eps, weight_decay=weight_decay, amsgrad=amsgrad) super(Adam, self).__init__(params, defaults) def step(self, closure=None): """Performs a single optimization step. Arguments: closure (callabl
e, optional): A closure that reevaluates the model and returns the loss. """ loss = None if closure is not None: loss = closure() for group in self.param_groups: for p in group['params']: if p.grad is None: continue grad = p.grad.data if grad.is_sparse: raise RuntimeError('Adam does not support sparse gradients, please con
sider SparseAdam instead') amsgrad = group['amsgrad'] state = self.state[p] # State initialization if len(state) == 0: state['step'] = 0 # Exponential moving average of gradient values state['exp_avg'] = torch.zeros_like(p.data) # Exponential moving average of squared gradient values state['exp_avg_sq'] = torch.zeros_like(p.data) if amsgrad: # Maintains max of all exp. moving avg. of sq. grad. values state['max_exp_avg_sq'] = torch.zeros_like(p.data) exp_avg, exp_avg_sq = state['exp_avg'], state['exp_avg_sq'] if amsgrad: max_exp_avg_sq = state['max_exp_avg_sq'] beta1, beta2 = group['betas'] state['step'] += 1 # Decay the first and second moment running average coefficient exp_avg.mul_(beta1).add_(1 - beta1, grad) exp_avg_sq.mul_(beta2).addcmul_(1 - beta2, grad, grad) if amsgrad: # Maintains the maximum of all 2nd moment running avg. till now torch.max(max_exp_avg_sq, exp_avg_sq, out=max_exp_avg_sq) # Use the max. for normalizing running avg. of gradient denom = max_exp_avg_sq.sqrt().add_(group['eps']) else: denom = exp_avg_sq.sqrt().add_(group['eps']) bias_correction1 = 1 - beta1 ** state['step'] bias_correction2 = 1 - beta2 ** state['step'] step_size = group['lr'] * math.sqrt(bias_correction2) / bias_correction1 if group['weight_decay'] != 0: p.data.add_(-group['weight_decay'] * group['lr'], p.data) p.data.addcdiv_(-step_size, exp_avg, denom) return loss
import logging from typing import List import numpy as np import torch import torch.nn as nn from pinta.model.model_base import NN LOG = logging.getLogger("ConvRNN") class ConvRNN(NN): """ Combination of a convolutional front end and an RNN (GRU) layer below >> see https://gist.github.com/spro/c87cc706625b8a54e604fb1024106556 """ def __init__( self, logdir: str, input_size: int, hidden_size: int, kernel_sizes: List[int], n_gru_layers: int, output_size: int, filename=None, tuning_input_size: int = -1, ): super().__init__(logdir) # ---- # Define the model self.input_size = input_size self.hidden_size = hidden_size self.output_size = output_size self.gru_layers = n_gru_layers # Conv front end self.conv1 = nn.Conv1d(input_size, hidden_size, kernel_size=kernel_sizes[0]) self.conv2 = nn.Conv1d(hidden_size, hidden_size, kernel_size=kernel_sizes[1]) self.relu = nn.ReLU() # GRU / LSTM layers # Requires [batch, seq, inputs] self.gru = nn.GRU( hidden_size, h
idden_size, n_gru_layers, dropout=0.01, batch_first=True )
# Ends with a fully connected layer self.out = nn.Linear(hidden_size, self.output_size) # Load from trained NN if required if filename is not None: self._valid = self.load(filename) if self._valid: return LOG.warning("Could not load the specified net, computing it from scratch") def forward(self, inputs, hidden=None): # Run through Conv1d and Pool1d layers r1 = self.relu(self.conv1(inputs)) r2 = self.relu(self.conv2(r1)) # GRU/LSTM layer expects [batch, seq, inputs] r2 = r2.transpose(1, 2) output_rnn, hidden_out = self.gru(r2, hidden) output = self.out(output_rnn[:, -1, :].squeeze()) return output, hidden_out def get_layer_weights(self): return self.conv1.weight def _get_conv_out(self, shape): # Useful to compute the shape out of the conv blocks # (including eventual padding..) o = self.conv(torch.zeros(1, *shape)) return int(np.prod(o.size()))
from
.utils
import Impl
import os import threading import Queue # Windows import import win32file import win32pipe import win32api import win32con import win32security import win32process import win32event class Win32Spawn(object): def __init__(self, cmd, shell=False): self.queue = Queue.Queue() self.is_terminated = False self.wake_up_event = win32event.CreateEvent(None, 0, 0, None) exec_dir = os.getcwd() comspec = os.environ.get("COMSPEC", "cmd.exe") cmd = comspec + ' /c ' + cmd win32event.ResetEvent(self.wake_up_event) currproc = win32api.GetCurrentProcess() sa = win32security.SECURITY_ATTRIBUTES() sa.bInheritHandle = 1 child_stdout_rd, child_stdout_wr = win32pipe.CreatePipe(sa, 0) child_stdout_rd_dup = win32api.DuplicateHandle(currproc, child_stdout_rd, currproc, 0, 0, win32con.DUPLICATE_SAME_ACCESS) win32file.CloseHandle(child_stdout_rd) child_stderr_rd, child_stderr_wr = win32pipe.CreatePipe(sa, 0) child_stderr_rd_dup = win32api.DuplicateHandle(currproc, child_stderr_rd, currproc, 0, 0, win32con.DUPLICATE_SAME_ACCESS) win32file.CloseHandle(child_stderr_rd) child_stdin_rd, child_stdin_wr = win32pipe.CreatePipe(sa, 0) child_stdin_wr_dup = win32api.DuplicateHandle(currproc, child_stdin_wr, currproc, 0, 0, win32con.DUPLICATE_SAME_ACCESS) win32file.CloseHandle(child_stdin_wr) startup_info = win32process.STARTUPINFO() startup_info.hStdInput = child_stdin_rd startup_info.hStdOutput = child_stdout_wr startup_info.hStdError = child_stderr_wr startup_info.dwFlags = win32process.STARTF_USESTDHANDLES cr_flags = 0 cr_flags = win32process.CREATE_NEW_PROCESS_GROUP env = os.environ.copy() self.h_process, h_thread, dw_pid, dw_tid = win32process.CreateProcess(None, cmd, None, None, 1, cr_flags, env, os.path.abspath(exec_dir), startup_info) win32api.CloseHandle(h_thread) win32file.CloseHandle(child_stdin_rd) win32file.CloseHandle(child_stdout_wr) win32file.CloseHandle(child_stderr_wr) self.__child_stdout = child_stdout_rd_dup self.__child_stderr = child_stderr_rd_dup self.__child_stdin = child_stdin_wr_dup self.exit_code = -1 def close(self): win32file.CloseHandle(self.__child_stdout) win32file.CloseHandle(self.__child_stderr) win32file.CloseHandle(self.__child_stdin) win32api.CloseHandle(self.h_process) win32api.CloseHandle(self.wake_up_event) def kill_subprocess(): win32event.SetEvent(self.wake_up_event) def sleep(secs): win32event.ResetEvent(self.wake_up_event) timeout = int(1000 * secs) val = win32event.WaitForSingleObject(self.wake_up_event, timeout) if val == win32event.WAIT_TIMEOUT: return True else: # The wake_up_event must have been signalled return False def get(self, block=True, timeout=None): return self.queue.get(block=block, timeout=timeout) def qsize(self): return self.queue.qsize() def __wait_for_child(self): # kick off threads to read from stdout and stderr of the child process threading.Thread(target=self.__do_read, args=(self.__child_stdout, )).start() threading.Thread(target=self.__do_read, args=(self.__child_stderr, )).start() while True: # block waiting for the process to finish or the interrupt to happen handles = (self.wake_up_event, self.h_process) val = win32event.WaitForMultipleObjects(handles, 0, win32event.INFINITE) if val >= win32event.WAIT_OBJECT_0 and val < win32event.WAIT_OBJECT_0 + len(handles): handle = handles[val - win32event.WAIT_OBJECT_0] if handle == self.wake_up_event: win32api.TerminateProcess(self.h_process, 1) win32event.ResetEvent(self.wake_up_event) return False elif handle == self.h_process: # the process has ended naturally return True else: assert False, "Unknown handle fired" else: assert False, "Unexpected return from WaitForMultipleObjects" # Wait for job to finish. Since this method blocks, it can to be called from another thread. # If the application wants to kill the process, it should call kill_subprocess(). def wait(self): if not self.__wait_for_child(): # it's been killed result = False else: # normal termination self.exit_code = win32process.GetExitCodeProcess(self.h_process) result = self.exit_code == 0 self.close() self.is_terminated = True return result # This method gets called on a worker thread to read from either a stderr # or stdout thread from the child process. def __do_read(self, handle): bytesToRead = 1024 while 1: try: finished = 0 hr, data =
win32file.ReadFile(handle, bytesToRead, None) if data: self.queue.put_nowait(data) except win32api.error: finished = 1 if finished: return def start_pipe(self): def worker(pipe): return pipe.w
ait() thrd = threading.Thread(target=worker, args=(self, )) thrd.start()
# -*- coding: utf-8 -*- import pytest from thefuck.shells.tcsh import Tcsh @pytest
.mark.usefixtures('isfile', 'no_memoize', 'no_cache') class TestTcsh(object): @pytest.fixture def shell(self): return Tcsh() @pytest.fixture(autouse=True) def Popen(self, mocker): mock = mock
er.patch('thefuck.shells.tcsh.Popen') mock.return_value.stdout.read.return_value = ( b'fuck\teval $(thefuck $(fc -ln -1))\n' b'l\tls -CF\n' b'la\tls -A\n' b'll\tls -alF') return mock @pytest.mark.parametrize('before, after', [ ('pwd', 'pwd'), ('fuck', 'eval $(thefuck $(fc -ln -1))'), ('awk', 'awk'), ('ll', 'ls -alF')]) def test_from_shell(self, before, after, shell): assert shell.from_shell(before) == after def test_to_shell(self, shell): assert shell.to_shell('pwd') == 'pwd' def test_and_(self, shell): assert shell.and_('ls', 'cd') == 'ls && cd' def test_or_(self, shell): assert shell.or_('ls', 'cd') == 'ls || cd' def test_get_aliases(self, shell): assert shell.get_aliases() == {'fuck': 'eval $(thefuck $(fc -ln -1))', 'l': 'ls -CF', 'la': 'ls -A', 'll': 'ls -alF'} def test_app_alias(self, shell): assert 'setenv TF_SHELL tcsh' in shell.app_alias('fuck') assert 'alias fuck' in shell.app_alias('fuck') assert 'alias FUCK' in shell.app_alias('FUCK') assert 'thefuck' in shell.app_alias('fuck') def test_get_history(self, history_lines, shell): history_lines(['ls', 'rm']) assert list(shell.get_history()) == ['ls', 'rm'] def test_how_to_configure(self, shell, config_exists): config_exists.return_value = True assert shell.how_to_configure().can_configure_automatically def test_how_to_configure_when_config_not_found(self, shell, config_exists): config_exists.return_value = False assert not shell.how_to_configure().can_configure_automatically
# coding=utf-8 # Copyright 2022 The ML Fairness Gym Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Classes for building distributions.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from absl import logging import attr import numpy as np from typing import Sequence @attr.s class Distribution(object): """Base distribution class. Inheriting classes should fill in the sample method and initialize dim. """ dim = attr.ib(init=False) def sample(self, rng): raise NotImplementedError def _check_sum_to_one(instance, attribute, value):
"""Raises ValueError if the value does not sum to one."""
del instance, attribute # Unused. value = np.array(value) if not np.isclose(np.sum(value), 1): raise ValueError("Array must sum to one. Got %s." % np.sum(value)) def _check_nonnegative(instance, attribute, value): """Raises ValueError if the value elements are negative.""" del instance, attribute # Unused. value = np.array(value) if np.any(value < 0): raise ValueError("Array must be nonnegative. Got %s." % value) def _check_in_zero_one_range(instance, attribute, value): """Raises ValueError if value is not in [0, 1].""" del instance, attribute # Unused. value = np.array(value) if np.any(value < 0) or np.any(value > 1): raise ValueError("Value must be in [0, 1]. Got %s." % value) @attr.s class Mixture(Distribution): """A mixture distribution.""" components = attr.ib(factory=list) # type: Sequence[Distribution] weights = attr.ib( factory=list, validator=[_check_sum_to_one, _check_nonnegative]) # type: Sequence[float] def sample(self, rng): logging.debug("Sampling from a mixture with %d components. Weights: %s", len(self.components), self.weights) component = rng.choice(self.components, p=self.weights) return component.sample(rng) def __attrs_post_init__(self): for component in self.components: if component.dim != self.components[0].dim: raise ValueError("Components do not have the same dimensionality.") self.dim = self.components[0].dim @attr.s class Gaussian(Distribution): """A Gaussian Distribution.""" mean = attr.ib() std = attr.ib() def __attrs_post_init__(self): self.dim = len(self.mean) def sample(self, rng): return rng.normal(self.mean, self.std) @attr.s class Bernoulli(Distribution): """A Bernoulli Distribution.""" p = attr.ib(validator=[_check_in_zero_one_range]) def __attrs_post_init__(self): self.dim = 1 def sample(self, rng): return rng.rand() < self.p @attr.s class Constant(Distribution): """A Constant Distribution.""" mean = attr.ib() def __attrs_post_init__(self): self.dim = len(self.mean) def sample(self, rng): del rng # Unused. return self.mean
from collections import defaultdict class Solution(object): def removeBoxes(self, boxes): """ :type boxes: List[int] :rtype: int """ unq, cnt = [], [] for b in boxes: if not unq or b != unq[-1]: unq.append(b) cnt.append(1) else: cnt[-1] += 1 n = len(unq) dp = [[0] * i for i in range(1, n + 1)] # [i][j] from j to i max pre = defaultdict(list) for i, b in enu
merate(unq): pre[b].append(i) dp[i][i] = cnt[i] ** 2 for j in range(i - 1, -1, -1): theMax = dp[i - 1][j] + cnt[i] ** 2 npre = len(pre[b]) if unq[j] != unq[i] else len(pre[b]) - 1 for kk in range(npre - 1, -1, -1): k = pre[b][kk] if k > j: theMax = max(theMax, dp[i][
k] + dp[k - 1][j]) else: break if unq[j] == unq[i]: poss = pre[b][kk:] nposs = len(poss) span = [] for p in range(nposs - 1): span.append(dp[poss[p + 1] - 1][poss[p] + 1]) total = sum(span) count_k = [cnt[p] for p in poss] total_k = sum(count_k) theMax = max(theMax, total + total_k ** 2) left_k = 0 for ki in range(nposs - 2): left_k += count_k[ki] right_k = total_k - left_k left_right = total - span[ki] for kj in range(ki + 2, nposs): left_right -= span[kj - 1] right_k -= count_k[kj - 1] theMax = max(theMax, dp[poss[kj] - 1][poss[ki] + 1] \ + left_right + (left_k + right_k) ** 2) dp[i][j] = theMax return dp[-1][0] # # TLE 20/60 # @memo # def dfs(*boxes): # if not boxes: # return 0 # dct = defaultdict(list) # pre = 0 # for i, b in enumerate(boxes): # if i == 0 or b != boxes[i - 1]: # dct[b].append([i, i+1]) # pre = i # else: # dct[b][-1][1] += 1 # idx, to_remove = set(), set() # ret = 0 # for k, v in dct.items(): # if len(v) == 1: # to_remove.add(k) # lo, hi = v[0] # idx.update(range(lo, hi)) # ret += (hi - lo) ** 2 # if ret: # return ret + dfs( # *(boxes[i] for i in range(len(boxes)) if i not in idx)) # for k, vs in dct.items(): # for lo, hi in vs: # ret = max( # ret, (hi - lo) ** 2 + dfs(*(boxes[:lo] + boxes[hi:]))) # return ret # return dfs(*boxes) # # TLE 20/60 # n = len(boxes) # first_value, last_first = {}, {} # dct = defaultdict(dict) # {val: {lo: [hi, step]}} # pre = 0 # for i, b in enumerate(boxes): # if i == 0 or b != boxes[i - 1]: # first_value[i] = b # dct[b][i] = [i+1, 1] # last_first[i] = pre # pre = i # else: # dct[b][pre][0] += 1 # dct[b][pre][1] += 1 # def remove(k, lo, dct, first_value, last_first): # hi, count = dct[k][lo] # lolo = last_first[lo] # if hi != n: # val = first_value[hi] # change = dct[val] # if lo != 0 and first_value[lolo] == val: # change[lolo][0] = change[hi][0] # change[lolo][1] += change[hi][1] # last_first[change[hi][0]] = lolo # else: # change[lo] = change[hi] # last_first[change[hi][0]] = lo # first_value[lo] = val # change.pop(hi) # elif lo != 0: # dct[first_value[lolo]][lolo][0] = hi # dct[k].pop(lo) # return count ** 2 # def dfs(dct, first_value, last_first, result): # while dct: # to_remove = [] # for k, v in dct.items(): # if len(v) == 1: # to_remove.append(k) # lo = next(iter(v.keys())) # result += remove(k, lo, dct, first_value, last_first) # if to_remove: # for k in to_remove: # dct.pop(k) # else: # break # r = result # for k, v in dct.items(): # for lo in v: # if lo == 0 or v[lo][0] == n: # continue # dct2 = deepcopy(dct) # first_value2 = first_value.copy() # last_first2 = last_first.copy() # count = remove(k, lo, dct2, first_value2, last_first2) # #dct2[k].pop(lo) # r = max( # r, dfs(dct2, first_value2, last_first2, result+count)) # return r # return dfs(dct, first_value, last_first, 0) assert Solution().removeBoxes([1, 3, 2, 2, 2, 3, 4, 3, 1]) == 23 assert Solution().removeBoxes([1, 3, 2, 2, 2, 3, 4, 2, 3, 1]) == 26 assert Solution().removeBoxes([8, 1, 2, 10, 8, 5, 1, 10, 8, 4]) == 16 print(Solution().removeBoxes([3, 8, 8, 5, 5, 3, 9, 2, 4, 4, 6, 5, 8, 4, 8, 6, 9, 6, 2, 8, 6, 4, 1, 9, 5, 3, 10, 5, 3, 3, 9, 8, 8, 6, 5, 3, 7, 4, 9, 6, 3, 9, 4, 3, 5, 10, 7, 6, 10, 7]))
# -*- encoding: utf-8 -*- ########################################################################### # Module Writen to OpenERP, Open Source Management Solution # # Copyright (c) 2013 Vauxoo - http://www.vauxoo.com/ # All Rights Reserved. # info Vauxoo (info@vauxoo.com) ############################################################################ # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Affero General Public License as # published by the Free Software Foundation, either version 3 of the # License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Affero Ge
neral Public License for more details. # # You should have rec
eived a copy of the GNU Affero General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # ############################################################################## from openerp.osv import osv, fields class res_partner(osv.Model): _inherit = 'res.partner' _order = "parent_left" _parent_order = "ref" _parent_store = True _columns = { 'parent_right': fields.integer('Parent Right', select=1), 'parent_left': fields.integer('Parent Left', select=1), }
s is None: self.transportModel = self.transportFunction() else: # order for reproductibility self.params = sorted(self.params.items()) # if grid search if self.isGridSearch(): # compute combinaison for each param self.findBestParameters(Xs, ys=ys, Xt=Xt, yt=yt) self.transportModel = self.transportFunction(**self.bestParam) else: # simply train with basic param self.transportModel = self.transportFunction(**self.params_) self.t
ransportModel.fit(Xs, ys=ys, Xt=Xt, yt=yt) if self.feedback: pushFeedback(20, feedback=self.feedback) return self.transportModel def predictTransfer(self, imageSource, outRaster, mask=None, NODATA=-9999, feedback=None
, norm=False): """ Predict model using domain adaptation. Parameters ---------- model : object Model generated from learnTransfer function. imageSource : str Path of image to adapt (source image) outRaster : str Path of tiff image to save as. mask: str, optional Path of raster mask. NODATA : int, optional Default -9999 feedback : object, optional For Qgis Processing. Default is None. Returns ------- outRaster : str Return the path of the predicted image. """ if self.feedback: pushFeedback('Now transporting ' + str(os.path.basename(imageSource))) dataSrc = gdal.Open(imageSource) # Get the size of the image d = dataSrc.RasterCount nc = dataSrc.RasterXSize nl = dataSrc.RasterYSize # Get the geoinformation GeoTransform = dataSrc.GetGeoTransform() Projection = dataSrc.GetProjection() # Get block size band = dataSrc.GetRasterBand(1) block_sizes = band.GetBlockSize() x_block_size = block_sizes[0] y_block_size = block_sizes[1] #gdal_dt = band.DataType # Initialize the output driver = gdal.GetDriverByName('GTiff') dst_ds = driver.Create(outRaster, nc, nl, d, 3) dst_ds.SetGeoTransform(GeoTransform) dst_ds.SetProjection(Projection) del band # Perform the classification if mask is not None: maskData = gdal.Open(mask, gdal.GA_ReadOnly) total = nl * y_block_size total = 80 / (int(nl / y_block_size)) for i in range(0, nl, y_block_size): # feedback for Qgis if self.feedback: pushFeedback(int(i * total) + 20, feedback=self.feedback) try: if self.feedback.isCanceled(): break except BaseException: pass if i + y_block_size < nl: # Check for size consistency in Y lines = y_block_size else: lines = nl - i for j in range( 0, nc, x_block_size): # Check for size consistency in X if j + x_block_size < nc: cols = x_block_size else: cols = nc - j # Load the data and Do the prediction X = np.empty((cols * lines, d)) for ind in range(d): X[:, ind] = dataSrc.GetRasterBand( int(ind + 1)).ReadAsArray(j, i, cols, lines).reshape(cols * lines) # Do the prediction if mask is None: mask_temp = dataSrc.GetRasterBand(1).ReadAsArray( j, i, cols, lines).reshape(cols * lines) else: mask_temp = maskData.GetRasterBand(1).ReadAsArray( j, i, cols, lines).reshape(cols * lines) # check if nodata t = np.where((mask_temp != 0) & (X[:, 0] != NODATA))[0] # transform array, default has nodata value yp = np.empty((cols * lines, d)) yp[:, :] = NODATA # yp = np.nan((cols*lines,d)) # K = np.zeros((cols*lines,)) # TODO: Change this part accorindgly ... # if t.size > 0: if t.size > 0: tempOT = X[t, :] yp[t, :] = self.transportModel.transform(tempOT) for ind in range(d): out = dst_ds.GetRasterBand(ind + 1) # Write the data ypTemp = yp[:, ind] out.WriteArray(ypTemp.reshape(lines, cols), j, i) out.SetNoDataValue(NODATA) out.FlushCache() del X, yp return outRaster def isGridSearch(self): # search for gridSearch paramGrid = [] for key in self.params_.keys(): if isinstance(self.params_.get(key), (list, np.ndarray)): paramGrid.append(key) if paramGrid == []: self.paramGrid = False else: self.paramGrid = paramGrid self.params = self.params_.copy() if self.paramGrid: return True else: return False def generateParamForGridSearch(self): hyperParam = {key: self.params_[key] for key in self.paramGrid} items = sorted(hyperParam.items()) keys, values = zip(*items) for v in product(*values): paramsToAdd = dict(zip(keys, v)) self.params.update(paramsToAdd) yield self.params def findBestParameters(self, Xs, ys, Xt, yt): self.bestScore = None for gridOT in self.generateParamForGridSearch(): self.transportModel = self.transportFunction(**gridOT) self.transportModel.fit(Xs, ys, Xt, yt) #XsTransformed = self.transportModel.transform(Xs) #XsPredict = self.inverseTransform(XsTransformed) from ot.da import BaseTransport transp_Xt = BaseTransport.inverse_transform( self.transportModel, Xs=Xs, ys=ys, Xt=Xt, yt=yt) if self.feedback: pushFeedback( 'Testing params : ' + str(gridOT), feedback=self.feedback) """ #score = mean_squared_error(Xs,XsPredict) from sklearn.svm import SVC from sklearn.model_selection import StratifiedKFold from sklearn.model_selection import GridSearchCV param_grid = dict(gamma=2.0**np.arange(-4,1), C=10.0**np.arange(-2,3)) classifier = SVC(probability=False) cv = StratifiedKFold(n_splits=5) grid = GridSearchCV(classifier,param_grid=param_grid, cv=cv,n_jobs=1) # need to rescale for hyperparameter of svm if self.scaler is False: from sklearn.preprocessing import MinMaxScaler scaler = MinMaxScaler(feature_range=(-1,1)) scaler.fit(Xs,ys) Xs = scaler.transform(Xs) XsPredict = scaler.transform(XsPredict) #XsPredict = scaler.transform(XsPredict) grid.fit(Xs,ys) model = grid.best_estimator_ model.fit(Xs,ys) yp = model.predict(XsPredict) currentScore = dict(OA=accuracy_score(yp,ys),Kappa=cohen_kappa_score(yp,ys),F1=f1_score(yp,ys,average='micro')) if self.feedback: pushFeedback('Kappa is : '+str(currentScore.get('Kappa'))) if self.bestScore is None or self.bestScore.get('Kappa') < currentScore.get('Kappa'): self.bestScore = currentScore.copy() self.bestParam = gridOT.copy() """ currentScore = mean_squared_error(Xs, transp_Xt) if self.feedback: pushFeedback( 'RMSE is : ' +
from datetime import timedelta from django.conf import settings from django.utils.timezone import now from rest_framework import status, pagination from rest_framework.generics import CreateAPIView, DestroyAPIView, ListAPIView from rest_framework.response import Response from churchill.api.v1.shots.serializers import ( ShotSerializer, ShotItemSerializer, ShotDateSerializer, ) from churchill.apps.shots.models import Shot, ShotItem from churchill.apps.shots.services import ( create_shot, delete_shot, delete_shot_item, create_shot_item, get_shots_calendar, ) class ShotsView(CreateAPIView, DestroyAPIView, ListAPIView): serializer_class = ShotSerializer def get_queryset(self): return Shot.objects.for_user(self.request.user) def create(self, request, *args, **kwargs): serializer = self.get_serializer(data=request.data) serializer.is_valid(raise_exception=True) shot = create_shot(request.user, **serializer.validated_data) serializer = self.get_serializer(shot) return Response(serializer.data, status=status.HTTP_201_CREATED) def destroy(self, request, *args, **kwargs): delete_shot(request.user, request.data["id"]) return Response() class ShotsItemPagination(pagination.PageNumberPagination): page_size = 100 class ShotsItemView(CreateAPIView, DestroyAPIView, ListAPIView): serializer_class = ShotItemSerializer pagination_class = ShotsItemPagination def get_queryset(self): default_offset = now() - timedelta(weeks=4) return ShotItem.objects.filter( user=self.request.user, created_at__gte=default_offset ).order_by("-created_at") def create(self, request, *args, **kwargs): try: shot = Shot.objects.for_user(self.request.user).get(id=request.data["id"]) except (KeyError, Shot.DoesNotExist): return Response(status=status.HTTP_400_BAD_REQUEST) shot_item = create_shot_item(request.user, shot) serializer = self.get_serializer(shot_item) return Response(serializer.data, status=status.HTTP_201_CREATED) def destroy(self, request, *args, **kwargs): delete_shot_item(request.user, request.data["id"]) return Response() class CalendarPagination(pagination.PageNumberPaginat
ion): page_size = settin
gs.CALENDAR_WEEK_SIZE * 7 class ShotsItemCalendarView(ListAPIView): serializer_class = ShotDateSerializer pagination_class = CalendarPagination def get_queryset(self): weeks_offset = int(self.request.query_params.get("weeks_offset", 0)) return get_shots_calendar(self.request.user, weeks_offset)
"""Thetests for the Modbus sensor component.""" import pytest from homeassistant.components.binary_sensor import DOMAIN as SENSOR_DOMAIN from homeassistant.components.modbus.const import ( CALL_TYPE_COIL, CALL_TYPE_DISCRETE, CONF_INPUT_TYPE, CONF_LAZY_ERROR, CONF_SLAVE_COUNT, ) from homeassistant.const import ( CONF_ADDRESS, CONF_BINARY_SENSORS, CONF_DEVICE_CLASS, CONF_NAME, CONF_SCAN_INTERVAL, CONF_SLAVE, STATE_OFF, STATE_ON, STATE_UNAVAILABLE, STATE_UNKNOWN, ) from homeassistant.core import State from .conftest import TEST_ENTITY_NAME, ReadResult, do_next_cycle ENTITY_ID = f"{SENSOR_DOMAIN}.{TEST_ENTITY_NAME}".replace(" ", "_") @pytest.mark.parametrize( "do_config", [ { CONF_BINARY_SENSORS: [ { CONF_NAME: TEST_ENTITY_NAME, CONF_ADDRESS: 51, } ] }, { CONF_BINARY_SENSORS: [ { CONF_NAME: TEST_ENTITY_NAME, CONF_ADDRESS: 51, CONF_SLAVE: 10, CONF_INPUT_TYPE: CALL_TYPE_DISCRETE, CONF_DEVICE_CLASS: "door", CONF_LAZY_ERROR: 10, } ] }, ], ) async def test_config_binary_sensor(hass, mock_modbus): """Run config test for binary sensor.""" assert SENSOR_DOMAIN in hass.config.components @pytest.mark.parametrize( "do_config", [ { CONF_BINARY_SENSORS: [ { CONF_NAME: TEST_ENTITY_NAME, CONF_ADDRESS: 51, CONF_INPUT_TYPE: CALL_TYPE_COIL, }, ], }, { CONF_BINARY_SENSORS: [ { CONF_NAME: TEST_ENTITY_NAME, CONF_ADDRESS: 51, CONF_INPUT_TYPE: CALL_TYPE_DISCRETE, }, ], }, ], ) @pytest.mark.parametrize( "register_words,do_exception,expected", [ ( [0xFF], False, STATE_ON, ), ( [0x01], False, STATE_ON, ), ( [0x00], False, STATE_OFF, ), ( [0x80], False, STATE_OFF, ), ( [0xFE], False, STATE_OFF, ), ( [0x00], True, STATE_UNAVAILABLE, ), ], ) async def test_all_binary_sensor(hass, expected, mock_do_cycle): """Run test for given config.""" assert hass.states.get(ENTITY_ID).state == expected @pytest.mark.parametrize( "do_config", [ { CONF_BINARY_SENSORS: [ { CONF_NAME: TEST_ENTITY_NAME, CONF_ADDRESS:
51, C
ONF_INPUT_TYPE: CALL_TYPE_COIL, CONF_SCAN_INTERVAL: 10, CONF_LAZY_ERROR: 2, }, ], }, ], ) @pytest.mark.parametrize( "register_words,do_exception,start_expect,end_expect", [ ( [0x00], True, STATE_UNKNOWN, STATE_UNAVAILABLE, ), ], ) async def test_lazy_error_binary_sensor(hass, start_expect, end_expect, mock_do_cycle): """Run test for given config.""" now = mock_do_cycle assert hass.states.get(ENTITY_ID).state == start_expect now = await do_next_cycle(hass, now, 11) assert hass.states.get(ENTITY_ID).state == start_expect now = await do_next_cycle(hass, now, 11) assert hass.states.get(ENTITY_ID).state == end_expect @pytest.mark.parametrize( "do_config", [ { CONF_BINARY_SENSORS: [ { CONF_NAME: TEST_ENTITY_NAME, CONF_ADDRESS: 1234, CONF_INPUT_TYPE: CALL_TYPE_COIL, } ] }, ], ) async def test_service_binary_sensor_update(hass, mock_modbus, mock_ha): """Run test for service homeassistant.update_entity.""" await hass.services.async_call( "homeassistant", "update_entity", {"entity_id": ENTITY_ID}, blocking=True ) await hass.async_block_till_done() assert hass.states.get(ENTITY_ID).state == STATE_OFF mock_modbus.read_coils.return_value = ReadResult([0x01]) await hass.services.async_call( "homeassistant", "update_entity", {"entity_id": ENTITY_ID}, blocking=True ) await hass.async_block_till_done() assert hass.states.get(ENTITY_ID).state == STATE_ON ENTITY_ID2 = f"{ENTITY_ID}_1" @pytest.mark.parametrize( "mock_test_state", [ ( State(ENTITY_ID, STATE_ON), State(ENTITY_ID2, STATE_OFF), ) ], indirect=True, ) @pytest.mark.parametrize( "do_config", [ { CONF_BINARY_SENSORS: [ { CONF_NAME: TEST_ENTITY_NAME, CONF_ADDRESS: 51, CONF_SCAN_INTERVAL: 0, CONF_SLAVE_COUNT: 1, } ] }, ], ) async def test_restore_state_binary_sensor(hass, mock_test_state, mock_modbus): """Run test for binary sensor restore state.""" assert hass.states.get(ENTITY_ID).state == mock_test_state[0].state assert hass.states.get(ENTITY_ID2).state == mock_test_state[1].state TEST_NAME = "test_sensor" @pytest.mark.parametrize( "do_config", [ { CONF_BINARY_SENSORS: [ { CONF_NAME: TEST_ENTITY_NAME, CONF_ADDRESS: 51, CONF_SLAVE_COUNT: 3, } ] }, ], ) async def test_config_slave_binary_sensor(hass, mock_modbus): """Run config test for binary sensor.""" assert SENSOR_DOMAIN in hass.config.components for addon in ["", " 1", " 2", " 3"]: entity_id = f"{SENSOR_DOMAIN}.{TEST_ENTITY_NAME}{addon}".replace(" ", "_") assert hass.states.get(entity_id) is not None @pytest.mark.parametrize( "do_config", [ { CONF_BINARY_SENSORS: [ { CONF_NAME: TEST_ENTITY_NAME, CONF_ADDRESS: 51, CONF_SLAVE_COUNT: 8, } ] }, ], ) @pytest.mark.parametrize( "register_words,expected, slaves", [ ( [0x01, 0x00], STATE_ON, [ STATE_OFF, STATE_OFF, STATE_OFF, STATE_OFF, STATE_OFF, STATE_OFF, STATE_OFF, STATE_OFF, ], ), ( [0x02, 0x00], STATE_OFF, [ STATE_ON, STATE_OFF, STATE_OFF, STATE_OFF, STATE_OFF, STATE_OFF, STATE_OFF, STATE_OFF, ], ), ( [0x01, 0x01], STATE_ON, [ STATE_OFF, STATE_OFF, STATE_OFF, STATE_OFF, STATE_OFF, STATE_OFF, STATE_OFF, STATE_ON, ], ), ], ) async def test_slave_binary_sensor(hass, expected, slaves, mock_do_cycle): """Run test for given config.""" assert hass.states.get(ENTITY_ID).state == expected for i in range(8): entity_id = f"{SENSOR_DOMAIN}.{TEST_ENTITY_NAME}_{i+1}".replace(" ", "_") assert hass.states.get(entity_id).state == slaves[i]
ate. # If we ask for the window state here it will never realize that # we have been maximized because the window state change is processed # after the resize event. Using a timer event causes it to happen # after all the events have been processsed. size = event.size() QtCore.QTimer.singleShot(1, lambda: self._store_unmaximized_size(size)) def _store_unmaximized_size(self, size): state = self.windowState() maximized = bool(state & Qt.WindowMaximized) if not maximized: width, height = size.width(), size.height() if width > 0 and height > 0: self._unmaximized_size = (width, height) def restore_state(self, settings=None): if settings is None: settings = Settings() settings.load() state = settings.get_gui_state(self) return bool(state) and self.apply_state(state) def apply_state(self, state): """Imports data for view save/restore""" result = True try: self.resize(state['width'], state['height']) except: result = False try: self.move(state['x'],
state['y']) except: result = False try: if state['maximized']: self.showMaximized() try: self._unmaximized_size = (state['width'], state['height'])
except: pass except: result = False self._apply_state_applied = result return result def export_state(self): """Exports data for view save/restore""" state = self.windowState() maximized = bool(state & Qt.WindowMaximized) # when maximized we don't want to overwrite saved width/height with # desktop dimensions. if maximized and self._unmaximized_size: width, height = self._unmaximized_size else: width, height = self.width(), self.height() return { 'x': self.x(), 'y': self.y(), 'width': width, 'height': height, 'maximized': maximized, } def save_settings(self): settings = Settings() settings.load() settings.add_recent(core.getcwd()) return self.save_state(settings=settings) def closeEvent(self, event): self.save_settings() self.Base.closeEvent(self, event) def init_state(self, settings, callback, *args, **kwargs): """Restore saved settings or set the initial location""" if not self.restore_state(settings=settings): callback(*args, **kwargs) self.center() class MainWindowMixin(WidgetMixin): def __init__(self): WidgetMixin.__init__(self) # Dockwidget options self.dockwidgets = [] self.lock_layout = False self.widget_version = 0 qtcompat.set_common_dock_options(self) def export_state(self): """Exports data for save/restore""" state = WidgetMixin.export_state(self) windowstate = self.saveState(self.widget_version) state['lock_layout'] = self.lock_layout state['windowstate'] = windowstate.toBase64().data().decode('ascii') return state def apply_state(self, state): result = WidgetMixin.apply_state(self, state) windowstate = state.get('windowstate', None) if windowstate is None: result = False else: from_base64 = QtCore.QByteArray.fromBase64 result = self.restoreState( from_base64(core.encode(windowstate)), self.widget_version) and result self.lock_layout = state.get('lock_layout', self.lock_layout) self.update_dockwidget_lock_state() self.update_dockwidget_tooltips() return result def set_lock_layout(self, lock_layout): self.lock_layout = lock_layout self.update_dockwidget_lock_state() def update_dockwidget_lock_state(self): if self.lock_layout: features = (QDockWidget.DockWidgetClosable | QDockWidget.DockWidgetFloatable) else: features = (QDockWidget.DockWidgetClosable | QDockWidget.DockWidgetFloatable | QDockWidget.DockWidgetMovable) for widget in self.dockwidgets: widget.titleBarWidget().update_tooltips() widget.setFeatures(features) def update_dockwidget_tooltips(self): for widget in self.dockwidgets: widget.titleBarWidget().update_tooltips() class TreeMixin(object): def __init__(self, widget, Base): self.widget = widget self.Base = Base widget.setAlternatingRowColors(True) widget.setUniformRowHeights(True) widget.setAllColumnsShowFocus(True) widget.setAnimated(True) widget.setRootIsDecorated(False) def keyPressEvent(self, event): """ Make LeftArrow to work on non-directories. When LeftArrow is pressed on a file entry or an unexpanded directory, then move the current index to the parent directory. This simplifies navigation using the keyboard. For power-users, we support Vim keybindings ;-P """ # Check whether the item is expanded before calling the base class # keyPressEvent otherwise we end up collapsing and changing the # current index in one shot, which we don't want to do. widget = self.widget index = widget.currentIndex() was_expanded = widget.isExpanded(index) was_collapsed = not was_expanded # Vim keybindings... # Rewrite the event before marshalling to QTreeView.event() key = event.key() # Remap 'H' to 'Left' if key == Qt.Key_H: event = QtGui.QKeyEvent(event.type(), Qt.Key_Left, event.modifiers()) # Remap 'J' to 'Down' elif key == Qt.Key_J: event = QtGui.QKeyEvent(event.type(), Qt.Key_Down, event.modifiers()) # Remap 'K' to 'Up' elif key == Qt.Key_K: event = QtGui.QKeyEvent(event.type(), Qt.Key_Up, event.modifiers()) # Remap 'L' to 'Right' elif key == Qt.Key_L: event = QtGui.QKeyEvent(event.type(), Qt.Key_Right, event.modifiers()) # Re-read the event key to take the remappings into account key = event.key() if key == Qt.Key_Up: idxs = widget.selectedIndexes() rows = [idx.row() for idx in idxs] if len(rows) == 1 and rows[0] == 0: # The cursor is at the beginning of the line. # If we have selection then simply reset the cursor. # Otherwise, emit a signal so that the parent can # change focus. widget.up.emit() elif key == Qt.Key_Space: widget.space.emit() result = self.Base.keyPressEvent(widget, event) # Let others hook in here before we change the indexes widget.index_about_to_change.emit() # Automatically select the first entry when expanding a directory if (key == Qt.Key_Right and was_collapsed and widget.isExpanded(index)): index = widget.moveCursor(widget.MoveDown, event.modifiers()) widget.setCurrentIndex(index) # Process non-root entries with valid parents only. elif key == Qt.Key_Left and index.parent().isValid(): # File entries have rowCount() == 0 if widget.model().itemFromIndex(index).rowCount() == 0: widget.setCurrentIndex(index.parent()) # Otherwise, do this for collapsed directories only elif was_col
"""Support for Z-Wave fans.""" import math from homeassistant.components.fan import ( DOMAIN as FAN_DOMAIN, SUPPORT_SET_SPEED, FanEntity, ) from homeassistant.core import callback from homeassistant.helpers.dispatcher import async_dispatcher_connect from homeassistant.util.percentage import ( percentage_to_ranged_value, ranged_value_to_percentage, ) from .const import DATA_UNSUBSCRIBE, DOMAIN from .entity import ZWaveDeviceEntity SUPPORTED_FEATURES = SUPPORT_SET_SPEED SPEED_RANGE = (1, 99) # off is not included async def async_setup_entry(hass, config_entry, async_add_entities): """Set up Z-Wave Fan from Config Entry.""" @callback def async_add_fan(values): """Add Z-Wave Fan.""" fan = ZwaveFan(values) async_add_entities([fan]) hass.data[DOMAIN][config_entry.entry_id][DATA_UNSUBSCRIBE].append( async_dispatcher_connect(hass, f"{DOMAIN}_new_{FAN_DOMAIN}", async_add_fan) ) class ZwaveFan(ZWaveDeviceEntity, FanEntity): """Representation of a Z-Wave fan.""" async def async_set_percentage(self, percentage): """Set the speed percentage of the fan.""" if percentage is None: # Value 255 tells device to return to previous value zwave_speed = 255 elif percentage == 0: zwave_speed = 0 else: zwave_speed = math.ceil(percentage_to_ranged_value(SPEED_RANGE, percentage)) self.values.primary.send_value(zwave_speed) async def async_turn_on( self, speed=None, percentage=None, preset_mode=None, **kwargs ): """Turn the d
evice on.""" await self.async_set_percentage(percentage)
async def async_turn_off(self, **kwargs): """Turn the device off.""" self.values.primary.send_value(0) @property def is_on(self): """Return true if device is on (speed above 0).""" return self.values.primary.value > 0 @property def percentage(self): """Return the current speed. The Z-Wave speed value is a byte 0-255. 255 means previous value. The normal range of the speed is 0-99. 0 means off. """ return ranged_value_to_percentage(SPEED_RANGE, self.values.primary.value) @property def supported_features(self): """Flag supported features.""" return SUPPORTED_FEATURES
s_global', is_flag=True, help="Show global metadata instead of local") @click.option('--filter', metavar='<filter>', help='Filter for name, description, blob, global,' ' networkNotificationDisabled, ID, externalID') @click.pass_context def metadata_list(ctx, filter, entity, id, is_global): """List all metadata associated to any entity""" if is_global: request = "%ss/%s/globalmetadatas" % (entity, id) else: request = "%ss/%s/metadatas" % (entity, id) result = ctx.obj['nc'].get(request, filter=filter) table = PrettyTable(["ID", "name", "description"]) for line in result: table.add_row([line['ID'], line['name'], line['description']]) print(table) @vsdcli.command(name='metadata-show') @click.argument('metadata-id', metavar='<Metadata ID>', required=True) @click.option('--data', 'data', is_flag=True, help="Show data content only. Preemptive option on list-tag") @click.option('--global', 'is_global', is_flag=True, help="Show global metadata instead of local") @click.option('--list-tag', is_flag=True, help="List tag for this metadata") @click.pass_context def metadata_show(ctx, metadata_id, data, is_global, list_tag): """Show information for a given metadata id""" if is_global: request = "globalmetadatas/%s" % metadata_id else: request = "metadatas/%s" % metadata_id result = ctx.obj['nc'].get(request)[0] if data: print(result['blob']) return if not list_tag: print_object(result, only=ctx.obj['show_only'], exclude=['blob']) return tags = [] for tag in result['metadataTagIDs']: tags.append(ctx.obj['nc'].get("metadatatags/%s" % tag)[0]) table = PrettyTable(["ID", "name", "description"]) for line in tags: table.add_row([line['ID'], line['name'], line['description']]) print(table) @vsdcli.command(name='metadata-create') @click.argument('name', metavar='<name>', required=True) @click.option('--entity', metavar='<name>', required=True, help="Can be any entity in VSD") @click.option('--id', metavar='<ID>', required=True, help="ID of the entity") @click.option('--tag', metavar='<ID>', multiple=True, help="tag's ID to add. Can be repeted") @click.option('--data', required=True, help="Metadata that describes about the entity attached to it.") @click.pass_context def metadata_create(ctx, name, entity, id, tag, data): """Create a metadata for a given entity ID""" params = {'name': name, 'blob': data} if tag: params['metadataTagIDs'] = [] for t in tag: params['metadataTagIDs'].append(t) request = "%ss/%s/metadatas" % (entity, id) result = ctx.obj['nc'].post(request, params)[0] print_object(result, only=ctx.obj['show_only'], exclude=['blob']) @vsdcli.command(name='metadata-update') @click.argument('metadata-id', metavar='<metadata ID>', required=True) @click.option('--key-value', metavar='<key:value>', multiple=True) @click.option('--global', 'is_global', is_flag=True, help="Update global metadata instead of local") @click.pass_context def metadata_update(ctx, metadata_id, key_value, is_global): """Update key/value for a given metadata""" params = {} for kv in key_value: key, value = kv.split(':', 1) params[key] = value if is_global: request = "globalmetadatas/%s" % metadata_id else: request = "metadatas/%s" % metadata_id ctx.obj['nc'].put(request, params) result = ctx.obj['nc'].get(request)[0] print_object(result, only=ctx.obj['show_only'], exclude=['blob']) @vsdcli.command(name='metadata-add-tag') @click.argument('metadata-id', metavar='<metadata ID>', required=True) @click.option('--tag', metavar='<ID>', multiple=True, required=True, help="tag's ID to add. Can be repeted") @click.option('--global', 'is_global', is_flag=True, help="Update global metadata instead of local") @click.pass_context def metadata_add_tag(ctx, metadata_id, is_global, tag): """Add single or multiple tag to an existing metadata""" if is_global: request = "globalmetadatas/%s" % metadata_id else: request = "metadatas/%s" % metadata_id params = {} params['metadataTagIDs'] = ctx.obj['nc'].get(request)[0]['metadataTagIDs'] for t in tag: params['metadataTagIDs'].append(t) ctx.obj['nc'].put(request, params) result = ctx.obj['nc'].get(request)[0] print_object(result, only=ctx.obj['show_only'], exclude=['blob']) @vsdcli.command(name='metadata-remove-tag') @click.argument('metadata-id', metavar='<metadata ID>', required=True) @click.option('--tag', metavar='<ID>', multiple=True, required=True, help="tag's ID to remove. Can be repeted") @click.option('--global', 'is_global', is_flag=True, help="Update global metadata instead of local") @click.pass_context def metadata_remove_tag(ctx, metadata_id, is_global, tag): """remove single or multiple tag to an existing metadata""" if is_global: request = "globalmetadatas/%s" % metadata_id else: request = "metadatas/%s" % metadata_id existing_tag = ctx.obj['nc'].get(request)[0]['metadataTagIDs'] if not len(existing_tag): print("Error: There is no tag for metadata
%s" % metadata_id) exit(1) params = {'metadataTagIDs': []} change = False for t in existing_tag: if t not in tag: para
ms['metadataTagIDs'].append(t) else: change = True if not change: print("Warning: none of given tag exists in metadata %s" % metadata_id) exit(1) ctx.obj['nc'].put(request, params) result = ctx.obj['nc'].get(request)[0] print_object(result, only=ctx.obj['show_only'], exclude=['blob']) @vsdcli.command(name='metadata-delete') @click.argument('metadata-id', metavar='<metadata ID>', required=True) @click.pass_context def metadata_delete(ctx, metadata_id): """Delete a given metadata""" ctx.obj['nc'].delete("metadatas/%s" % metadata_id) @vsdcli.command(name='metadatatag-list') @click.option('--enterprise-id', metavar='<ID>') @click.option('--metadata-id', metavar='<ID>') @click.option('--filter', metavar='<filter>', help="Filter for name, description, associatedExternalServiceID" ", autoCreated, ID, externalID") @click.pass_context def metadatatag_list(ctx, enterprise_id, metadata_id, filter): """Show all metadata tags for a given enterprise or metadata. If nor enterprise or metadata is given, list all metadata tags associated to DC""" if enterprise_id: request = "enterprises/%s/metadatatags" % enterprise_id elif metadata_id: request = "metadatas/%s/metadatatags" % metadata_id else: request = "metadatatags" result = ctx.obj['nc'].get(request, filter=filter) table = PrettyTable(["ID", "name", "description"]) for line in result: table.add_row([line['ID'], line['name'], line['description']]) print(table) @vsdcli.command(name='metadatatag-show') @click.argument('metadatatag-id', metavar='<ID>', required=True) @click.pass_context def metadatatag_show(ctx, metadatatag_id): """Show information for a given metadata tag id""" result = ctx.obj['nc'].get("metadatatags/%s" % metadatatag_id)[0] print_object(result, only=ctx.obj['show_only']) @vsdcli.command(name='metadatatag-create') @click.argument('name', metavar='<name>', required=True) @click.option('--enterprise-id', metavar='<ID>') @click.option('--description') @click.pass_context def metadatatag_create(ctx, name, enterprise_id, description): """Add an metadatatag to a given enterprise. CSPROOT can create DC associated tag if enterprise id is not specified""" if enterprise_id:
#!/usr/bin/env python # -*- coding: utf-8 -*- # # ***** BEGIN LICENSE BLOCK ***** # Copyright (C) 2012-2014, Hayaki Saito # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the "Software"), # to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense, # and/or sell copies of the Software, and to permit persons to whom the # Software is furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # TH
E SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL # THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER # DEALINGS IN THE SOFTWARE. # # ***** END LICENSE BLOCK ***** from termprop import Termprop, MockTermprop if __name__ == "__main__": Termprop().test()
, 'airflow.providers.google.cloud.sensors.gcs.GCSObjectsWtihPrefixExistenceSensor', 'airflow.providers.google.cloud.operators.dataproc.DataprocSubmitHadoopJobOperator', 'airflow.providers.google.cloud.operators.dataproc.DataprocScaleClusterOperator', 'airflow.providers.google.cloud.operators.dataproc.DataprocSubmitSparkJobOperator', 'airflow.providers.google.cloud.operators.dataproc.DataprocSubmitSparkSqlJobOperator', 'airflow.providers.google.cloud.operators.dataproc.DataprocSubmitHiveJobOperator', 'airflow.providers.google.cloud.operators.dataproc.DataprocSubmitPigJobOperator', 'airflow.providers.google.cloud.operators.dataproc.DataprocSubmitPySparkJobOperator', 'airflow.providers.google.cloud.operators.mlengine.MLEngineManageModelOperator', 'airflow.providers.google.cloud.operators.mlengine.MLEngineManageVersionOperator', 'airflow.providers.google.cloud.operators.dataflow.DataflowCreateJavaJobOperator', 'airflow.providers.google.cloud.operators.bigquery.BigQueryPatchDatasetOperator', 'airflow.providers.google.cloud.operators.dataflow.DataflowCreatePythonJobOperator', 'airflow.providers.google.cloud.operators.bigquery.BigQueryExecuteQueryOperator', } # Those operators should not have examples as they are never used standalone (they are abstract) BASE_OPERATORS = { 'airflow.providers.google.cloud.operators.compute.ComputeEngineBaseOperator', 'airflow.providers.google.cloud.operators.cloud_sql.CloudSQLBaseOperator', 'airflow.providers.google.cloud.operators.dataproc.DataprocJobBaseOperator', } # Please at the examples to those operators at the earliest convenience :) MISSING_EXAMPLES_FOR_OPERATORS = { 'airflow.providers.google.cloud.operators.dataproc.DataprocInstantiateInlineWorkflowTemplateOperator', 'airflow.providers.google.cloud.operators.mlengine.MLEngineTrainingCancelJobOperator', 'airflow.providers.google.cloud.operators.dlp.CloudDLPGetStoredInfoTypeOperator', 'airflow.providers.google.cloud.ope
rators.dlp.CloudDLPReidentifyContentOperator', 'airflow.providers.google.cloud.operators.dlp.CloudDLPCreateDeidentifyTemplateOperator', 'airflow.providers.google.cloud.operators.dlp.CloudDLPCreateDLPJobOperator', 'airflow.providers.google.cloud.operators.dlp.CloudDLPU
pdateDeidentifyTemplateOperator', 'airflow.providers.google.cloud.operators.dlp.CloudDLPGetDLPJobTriggerOperator', 'airflow.providers.google.cloud.operators.dlp.CloudDLPListDeidentifyTemplatesOperator', 'airflow.providers.google.cloud.operators.dlp.CloudDLPGetDeidentifyTemplateOperator', 'airflow.providers.google.cloud.operators.dlp.CloudDLPListInspectTemplatesOperator', 'airflow.providers.google.cloud.operators.dlp.CloudDLPListStoredInfoTypesOperator', 'airflow.providers.google.cloud.operators.dlp.CloudDLPUpdateInspectTemplateOperator', 'airflow.providers.google.cloud.operators.dlp.CloudDLPDeleteDLPJobOperator', 'airflow.providers.google.cloud.operators.dlp.CloudDLPListJobTriggersOperator', 'airflow.providers.google.cloud.operators.dlp.CloudDLPCancelDLPJobOperator', 'airflow.providers.google.cloud.operators.dlp.CloudDLPGetDLPJobOperator', 'airflow.providers.google.cloud.operators.dlp.CloudDLPGetInspectTemplateOperator', 'airflow.providers.google.cloud.operators.dlp.CloudDLPListInfoTypesOperator', 'airflow.providers.google.cloud.operators.dlp.CloudDLPDeleteDeidentifyTemplateOperator', 'airflow.providers.google.cloud.operators.dlp.CloudDLPListDLPJobsOperator', 'airflow.providers.google.cloud.operators.dlp.CloudDLPRedactImageOperator', 'airflow.providers.google.cloud.operators.datastore.CloudDatastoreDeleteOperationOperator', 'airflow.providers.google.cloud.operators.datastore.CloudDatastoreGetOperationOperator', 'airflow.providers.google.cloud.sensors.gcs.GCSObjectUpdateSensor', 'airflow.providers.google.cloud.sensors.gcs.GCSUploadSessionCompleteSensor', } def test_example_dags(self): operators_modules = itertools.chain( *(self.find_resource_files(resource_type=d) for d in ["operators", "sensors", "transfers"]) ) example_dags_files = self.find_resource_files(resource_type="example_dags") # Generate tuple of department and service e.g. ('marketing_platform', 'display_video') operator_sets = [(f.split("/")[-3], f.split("/")[-1].rsplit(".")[0]) for f in operators_modules] example_sets = [ (f.split("/")[-3], f.split("/")[-1].rsplit(".")[0].replace("example_", "", 1)) for f in example_dags_files ] def has_example_dag(operator_set): for e in example_sets: if e[0] != operator_set[0]: continue if e[1].startswith(operator_set[1]): return True return False with self.subTest("Detect missing example dags"): missing_example = {s for s in operator_sets if not has_example_dag(s)} missing_example -= self.MISSING_EXAMPLE_DAGS assert set() == missing_example with self.subTest("Keep update missing example dags list"): new_example_dag = set(example_sets).intersection(set(self.MISSING_EXAMPLE_DAGS)) if new_example_dag: new_example_dag_text = '\n'.join(str(f) for f in new_example_dag) self.fail( "You've added a example dag currently listed as missing:\n" f"{new_example_dag_text}" "\n" "Thank you very much.\n" "Can you remove it from the list of missing example, please?" ) with self.subTest("Remove extra elements"): extra_example_dags = set(self.MISSING_EXAMPLE_DAGS) - set(operator_sets) if extra_example_dags: new_example_dag_text = '\n'.join(str(f) for f in extra_example_dags) self.fail( "You've added a example dag currently listed as missing:\n" f"{new_example_dag_text}" "\n" "Thank you very much.\n" "Can you remove it from the list of missing example, please?" ) def test_missing_example_for_operator(self): missing_operators = [] for resource_type in ["operators", "sensors", "transfers"]: operator_files = set( self.find_resource_files(top_level_directory="airflow", resource_type=resource_type) ) for filepath in operator_files: service_name = os.path.basename(filepath)[: -(len(".py"))] example_dags = list( glob.glob( f"{ROOT_FOLDER}/airflow/providers/google/*/example_dags/example_{service_name}*.py" ) ) if not example_dags: # Ignore. We have separate tests that detect this. continue example_paths = { path for example_dag in example_dags for path in get_imports_from_file(example_dag) } example_paths = { path for path in example_paths if f'.{resource_type}.{service_name}.' in path } print("example_paths=", example_paths) operators_paths = set(get_classes_from_file(f"{ROOT_FOLDER}/{filepath}")) missing_operators.extend(operators_paths - example_paths) full_set = set() full_set.update(self.MISSING_EXAMPLES_FOR_OPERATORS) full_set.update(self.DEPRECATED_OPERATORS) full_set.update(self.BASE_OPERATORS) assert set(missing_operators) == full_set @parameterized.expand( itertools.product(["_system.py", "_system_helper.py"], ["operators", "sensors", "transfers"]) ) def test_detect_invali
# -*- coding: utf-8 -
*- # Generated by Django 1.11
.7 on 2018-01-04 10:49 from __future__ import unicode_literals from django.db import migrations class Migration(migrations.Migration): dependencies = [ ('polls', '0011_remove_vote_endorse'), ] operations = [ migrations.RenameField( model_name='simplevote', old_name='endorse_new', new_name='endorse', ), ]
# http://www.k12reader.com/dolch-word-list-sorted-alphabetically-by-grade-with-nouns/ f = open("data1.txt") header = f.readline() from collections import OrderedDict database = OrderedDict() for item in header.split(): database[item] = [] for line in f.readlines(): items = line.rstrip().split('\t') for index, item in enumerate(items): if not item: continue # Since there are
two colums for nouns
# And we collapsed into one if index > 5: index = 5 category = database.keys()[index] database[category].append(item)
# -*- coding: utf-8 -*- import numpy as np import pytest import pyls @pytest.fixt
ure(scope='session') def testdir(tmpdir_factory): data_dir = tmpdir_factory.mktemp('data') return str(data_dir) @pytest.fixture(scope='session') def mpls_results(): Xf = 1000 subj = 100 rs = np.random.RandomState(1234) return pyls.meancentered_pls(rs.rand(subj, Xf), n_cond=2,
n_perm=10, n_boot=10, n_split=10) @pytest.fixture(scope='session') def bpls_results(): Xf = 1000 Yf = 100 subj = 100 rs = np.random.RandomState(1234) return pyls.behavioral_pls(rs.rand(subj, Xf), rs.rand(subj, Yf), n_perm=10, n_boot=10, n_split=10) @pytest.fixture(scope='session') def pls_inputs(): return dict(X=np.random.rand(100, 1000), Y=np.random.rand(100, 100), groups=[50, 50], n_cond=1, mean_centering=0, n_perm=10, n_boot=10, n_split=5, test_size=0.25, test_split=100, rotate=True, ci=95, seed=1234, verbose=True, permsamples=10, bootsamples=10)
from mcpi.minecraft i
mport Minecraft from time import sleep mc = Minecraft.create() class mic: x=0 y=0 z=0 u=1 def usid(self): t=mc.getPlayerEntityIds() print t def uspos(self,wkj): self.x,self.y,se
lf.z = mc.entity.getPos(wkj) print self.x,self.y,self.z def wdfe(self,item): mc.setBlock(self.x,self.y,self.z, item) def tnt(self,item): mc.setBlock(self.x,self.y,self.z, item,1) s=mic() s.usid() #s.uspos(57369) s.uspos(1) s.wdfe(46) #s.uspos(20514)
#!/usr/bin/env python # Copyright (c) 2012 Google Inc. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """ Make sure function-level linking setting is extracted properly. """ import TestGyp import sys if sys.platform == 'win32': test = TestGyp.TestGyp(formats=['msvs', 'ninja']) CHDIR = 'compiler-flags' test.run_gyp('function-level-linking.gyp', chdir=CHDIR) test.build('function-level-linking.gyp', test.ALL, chdir=CHDIR) def CheckForSection
String(binary, search_for, should_exist): output = test.run_dumpbin('/headers', binary) if should_exist and search_for not in output: print 'Did not find "%s" in %s' % (search_for, binary) test.fail_test() elif not should_exist and search_for in output: print 'Found "%s" in %s (and shouldn\'t have)' % (search_for, binary) test.fail_test() def Object(proj, obj): sep = '.' if test.fo
rmat == 'ninja' else '\\' return 'obj\\%s%s%s' % (proj, sep, obj) look_for = '''COMDAT; sym= "int __cdecl comdat_function''' # When function level linking is on, the functions should be listed as # separate comdat entries. CheckForSectionString( test.built_file_path(Object('test_fll_on', 'function-level-linking.obj'), chdir=CHDIR), look_for, should_exist=True) CheckForSectionString( test.built_file_path(Object('test_fll_off', 'function-level-linking.obj'), chdir=CHDIR), look_for, should_exist=False) test.pass_test()
# -*- coding: utf-8 -*- # Generated by Django 1.9.12 on 2017-01-30 14:30 from __future__ import unicode_liter
als import enum from django.db import migrations import enumfields.fields class TrxType(enum.Enum): FINALIZED = 0 PENDING = 1 CANCELLATION = 2 class TrxStatus(enum.Enum): PENDING = 0 FINALIZED = 1 REJECTED = 2 CANCELED = 3
class Migration(migrations.Migration): dependencies = [ ('wallet', '0005_auto_20160309_1722'), ] operations = [ migrations.AlterField( model_name='wallettransaction', name='trx_status', field=enumfields.fields.EnumIntegerField(default=1, enum=TrxStatus), ), migrations.AlterField( model_name='wallettransaction', name='trx_type', field=enumfields.fields.EnumIntegerField(default=0, enum=TrxType), ), ]
# C
opyright 2021 DeepMind Technologies Limited. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed
under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """ResNet model family.""" import functools import haiku as hk import jax import jax.numpy as jnp from nfnets import base class ResNet(hk.Module): """ResNetv2 Models.""" variant_dict = {'ResNet50': {'depth': [3, 4, 6, 3]}, 'ResNet101': {'depth': [3, 4, 23, 3]}, 'ResNet152': {'depth': [3, 8, 36, 3]}, 'ResNet200': {'depth': [3, 24, 36, 3]}, 'ResNet288': {'depth': [24, 24, 24, 24]}, 'ResNet600': {'depth': [50, 50, 50, 50]}, } def __init__(self, width, num_classes, variant='ResNet50', which_norm='BatchNorm', norm_kwargs=None, activation='relu', drop_rate=0.0, fc_init=jnp.zeros, conv_kwargs=None, preactivation=True, use_se=False, se_ratio=0.25, name='ResNet'): super().__init__(name=name) self.width = width self.num_classes = num_classes self.variant = variant self.depth_pattern = self.variant_dict[variant]['depth'] self.activation = getattr(jax.nn, activation) self.drop_rate = drop_rate self.which_norm = getattr(hk, which_norm) if norm_kwargs is not None: self.which_norm = functools.partial(self.which_norm, **norm_kwargs) if conv_kwargs is not None: self.which_conv = functools.partial(hk.Conv2D, **conv_kwargs) else: self.which_conv = hk.Conv2D self.preactivation = preactivation # Stem self.initial_conv = self.which_conv(16 * self.width, kernel_shape=7, stride=2, padding='SAME', with_bias=False, name='initial_conv') if not self.preactivation: self.initial_bn = self.which_norm(name='initial_bn') which_block = ResBlockV2 if self.preactivation else ResBlockV1 # Body self.blocks = [] for multiplier, blocks_per_stage, stride in zip([64, 128, 256, 512], self.depth_pattern, [1, 2, 2, 2]): for block_index in range(blocks_per_stage): self.blocks += [which_block(multiplier * self.width, use_projection=block_index == 0, stride=stride if block_index == 0 else 1, activation=self.activation, which_norm=self.which_norm, which_conv=self.which_conv, use_se=use_se, se_ratio=se_ratio)] # Head self.final_bn = self.which_norm(name='final_bn') self.fc = hk.Linear(self.num_classes, w_init=fc_init, with_bias=True) def __call__(self, x, is_training, test_local_stats=False, return_metrics=False): """Return the output of the final layer without any [log-]softmax.""" outputs = {} # Stem out = self.initial_conv(x) if not self.preactivation: out = self.activation(self.initial_bn(out, is_training, test_local_stats)) out = hk.max_pool(out, window_shape=(1, 3, 3, 1), strides=(1, 2, 2, 1), padding='SAME') if return_metrics: outputs.update(base.signal_metrics(out, 0)) # Blocks for i, block in enumerate(self.blocks): out, res_var = block(out, is_training, test_local_stats) if return_metrics: outputs.update(base.signal_metrics(out, i + 1)) outputs[f'res_avg_var_{i}'] = res_var if self.preactivation: out = self.activation(self.final_bn(out, is_training, test_local_stats)) # Pool, dropout, classify pool = jnp.mean(out, axis=[1, 2]) # Return pool before dropout in case we want to regularize it separately. outputs['pool'] = pool # Optionally apply dropout if self.drop_rate > 0.0 and is_training: pool = hk.dropout(hk.next_rng_key(), self.drop_rate, pool) outputs['logits'] = self.fc(pool) return outputs class ResBlockV2(hk.Module): """ResNet preac block, 1x1->3x3->1x1 with strides and shortcut downsample.""" def __init__(self, out_ch, stride=1, use_projection=False, activation=jax.nn.relu, which_norm=hk.BatchNorm, which_conv=hk.Conv2D, use_se=False, se_ratio=0.25, name=None): super().__init__(name=name) self.out_ch = out_ch self.stride = stride self.use_projection = use_projection self.activation = activation self.which_norm = which_norm self.which_conv = which_conv self.use_se = use_se self.se_ratio = se_ratio self.width = self.out_ch // 4 self.bn0 = which_norm(name='bn0') self.conv0 = which_conv(self.width, kernel_shape=1, with_bias=False, padding='SAME', name='conv0') self.bn1 = which_norm(name='bn1') self.conv1 = which_conv(self.width, stride=self.stride, kernel_shape=3, with_bias=False, padding='SAME', name='conv1') self.bn2 = which_norm(name='bn2') self.conv2 = which_conv(self.out_ch, kernel_shape=1, with_bias=False, padding='SAME', name='conv2') if self.use_projection: self.conv_shortcut = which_conv(self.out_ch, stride=stride, kernel_shape=1, with_bias=False, padding='SAME', name='conv_shortcut') if self.use_se: self.se = base.SqueezeExcite(self.out_ch, self.out_ch, self.se_ratio) def __call__(self, x, is_training, test_local_stats): bn_args = (is_training, test_local_stats) out = self.activation(self.bn0(x, *bn_args)) if self.use_projection: shortcut = self.conv_shortcut(out) else: shortcut = x out = self.conv0(out) out = self.conv1(self.activation(self.bn1(out, *bn_args))) out = self.conv2(self.activation(self.bn2(out, *bn_args))) if self.use_se: out = self.se(out) * out # Get average residual standard deviation for reporting metrics. res_avg_var = jnp.mean(jnp.var(out, axis=[0, 1, 2])) return out + shortcut, res_avg_var class ResBlockV1(ResBlockV2): """Post-Ac Residual Block.""" def __call__(self, x, is_training, test_local_stats): bn_args = (is_training, test_local_stats) if self.use_projection: shortcut = self.conv_shortcut(x) shortcut = self.which_norm(name='shortcut_bn')(shortcut, *bn_args) else: shortcut = x out = self.activation(self.bn0(self.conv0(x), *bn_args)) out = self.activation(self.bn1(self.conv1(out), *bn_args)) out = self.bn2(self.conv2(out), *bn_args) if self.use_se: out = self.se(out) * out res_avg_var = jnp.mean(jnp.var(out, axis=[0, 1, 2])) return self.activation(out + shortcut), res_avg_var
# TODO inspect for Cython (see sagenb.misc.sageinspect) from __future__ import print_function from nose.plugins.skip import SkipTest from nose.tools import assert_true from os import path as op import sys import inspect import warnings import imp from pkgutil import walk_packages from inspect import getsource import mne from mne.utils import run_tests_if_main from mne.fixes import _get_args public_modules = [ # the list of modules users need to access for all functionality 'mne', 'mne.beamformer', 'mne.connectivity', 'mne.datasets', 'mne.datasets.megsim', 'mne.datasets.sample', 'mne.datasets.spm_face', 'mne.decoding', 'mne.filter', 'mne.gui', 'mne.inverse_sparse', 'mne.io', 'mne.io.kit', 'mne.minimum_norm', 'mne.preprocessing', 'mne.realtime', 'mne.report', 'mne.simulation', 'mne.source_estimate', 'mne.source_space', 'mne.stats', 'mne.time_frequency', 'mne.viz', ] docscrape_path = op.join(op.dirname(__file__), '..', '..', 'doc', 'sphinxext', 'numpy_ext', 'docscrape.py') if op.isfile(docscrape_path): docscrape = imp.load_source('docscrape', docscrape_path) else: docscrape = None def get_name(func): parts = [] module = inspect.getmodule(func) if module: parts.append(module.__name__) if hasattr(func, 'im_class'):
parts.append(func.im_class.__name__) parts.append(func.__name__) return '.'.join(parts) # functions to ignore args / docstring of _docstring_ignores = [ 'mne.io.write', # always ignore these 'mne.fixes._in1d', # fix function 'mne.epochs.average_movements', # deprecated pos param ] _tab_ignores = [ 'mne.channels.tests.test_montage', # demo da
ta has a tab ] def check_parameters_match(func, doc=None): """Helper to check docstring, returns list of incorrect results""" incorrect = [] name_ = get_name(func) if not name_.startswith('mne.') or name_.startswith('mne.externals'): return incorrect if inspect.isdatadescriptor(func): return incorrect args = _get_args(func) # drop self if len(args) > 0 and args[0] == 'self': args = args[1:] if doc is None: with warnings.catch_warnings(record=True) as w: doc = docscrape.FunctionDoc(func) if len(w): raise RuntimeError('Error for %s:\n%s' % (name_, w[0])) # check set param_names = [name for name, _, _ in doc['Parameters']] # clean up some docscrape output: param_names = [name.split(':')[0].strip('` ') for name in param_names] param_names = [name for name in param_names if '*' not in name] if len(param_names) != len(args): bad = str(sorted(list(set(param_names) - set(args)) + list(set(args) - set(param_names)))) if not any(d in name_ for d in _docstring_ignores) and \ 'deprecation_wrapped' not in func.__code__.co_name: incorrect += [name_ + ' arg mismatch: ' + bad] else: for n1, n2 in zip(param_names, args): if n1 != n2: incorrect += [name_ + ' ' + n1 + ' != ' + n2] return incorrect def test_docstring_parameters(): """Test module docsting formatting""" if docscrape is None: raise SkipTest('This must be run from the mne-python source directory') incorrect = [] for name in public_modules: module = __import__(name, globals()) for submod in name.split('.')[1:]: module = getattr(module, submod) classes = inspect.getmembers(module, inspect.isclass) for cname, cls in classes: if cname.startswith('_'): continue with warnings.catch_warnings(record=True) as w: cdoc = docscrape.ClassDoc(cls) if len(w): raise RuntimeError('Error for __init__ of %s in %s:\n%s' % (cls, name, w[0])) if hasattr(cls, '__init__'): incorrect += check_parameters_match(cls.__init__, cdoc) for method_name in cdoc.methods: method = getattr(cls, method_name) incorrect += check_parameters_match(method) if hasattr(cls, '__call__'): incorrect += check_parameters_match(cls.__call__) functions = inspect.getmembers(module, inspect.isfunction) for fname, func in functions: if fname.startswith('_'): continue incorrect += check_parameters_match(func) msg = '\n' + '\n'.join(sorted(list(set(incorrect)))) if len(incorrect) > 0: raise AssertionError(msg) def test_tabs(): """Test that there are no tabs in our source files""" for importer, modname, ispkg in walk_packages(mne.__path__, prefix='mne.'): if not ispkg and modname not in _tab_ignores: # mod = importlib.import_module(modname) # not py26 compatible! __import__(modname) # because we don't import e.g. mne.tests w/mne mod = sys.modules[modname] source = getsource(mod) assert_true('\t' not in source, '"%s" has tabs, please remove them or add it to the' 'ignore list' % modname) run_tests_if_main()
ayers.core import Dense, Activation, Dropout from keras.layers.embeddings import Embedding from keras.layers.recurrent import LSTM, GRU from keras.layers.normalization import BatchNormalization from keras.utils import np_utils from keras.layers import Merge from keras.layers import TimeDistributed, Lambda from keras.layers import Convolution1D, GlobalMaxPooling1D from keras.callbacks import ModelCheckpoint from keras import backend as K from keras.layers.advanced_activations import PReLU from keras.preprocessing import sequence, text training = True training = False data0 = pd.read_csv('../input/quora_duplicate_questions.tsv', sep='\t') data = pd.read_csv("../input/test.csv") if training: y = data0.is_duplicate.values #%% 数据预处理,将文本转换成索引矩阵 # '''Class for vectorizing texts, or/and turning texts into sequences (=list of word indexes, where the word of rank i in the dataset (starting at 1) has index i). ''' tk = text.Tokenizer(num_words=200000) max_len = 40 #也就是在这些数据上构建单词库 tk.fit_on_texts(list(data.question1.astype('str').values) + list(data.question2.astype('str').values) + list(data0.question1.astype('str').values) + list(data0.question2.astype('str').values)) #将输入的文本转换成单词库中的索引 if training: x1 = tk.texts_to_sequences(data0.question1.values) else: x1 = tk.texts_to_sequences(data.question1.values) ''' 将一系列文本索引转换成一个矩阵,每一行是一个样本(也就是一个question),每个样本最多包含40个单词。 每个question裁剪到了40个单词。这就是输入。 Transform a list of num_samples sequences (lists of scalars) into a 2D Numpy array of shape (num_samples, num_timesteps). num_timesteps is either the maxlen argument if provided, or the length of the longest sequence otherwise. Sequences that are shorter than num_timesteps are padded with value at the end. ''' x1 = sequence.pad_sequences(x1, maxlen=max_len) if training: x2 = tk.texts_to_sequences(data0.question2.values.astype(str)) else: x2 = tk.texts_to_sequences(data.question2.values.astype(str)) x2 = sequence.pad_sequences(x2, maxlen=max_len) #%% ''' dictionary mapping words (str) to their rank/index (int). Only set after fit_on_texts was called ''' word_index = tk.word_index ''' Converts a class vector (integers) to binary class matrix. E.g. for use with categorical_crossentropy. ''' #ytrain_enc = np_utils.to_categorical(y) embeddings_index = {} # 第一行是单词,后面是单词的属性向量。和word2vec类似。每个单词用300维的向量表示。840B tokens f = open('../input/glove.840B.300d.txt', encoding='utf-8') for line in tqdm(f): values = line.strip().split(r' ') word = values[0] coefs = np.asarray(values[1:], dtype='float32') embeddings_index[word] = coefs f.close() print('Found %s word vectors.' % len(embeddings_index)) #将quora里的单词转换成GloVe矩阵 embedding_matrix = np.zeros((len(word_index) + 1, 300)) for word, i in tqdm(word_index.items()): embedding_vector = embeddings_index.get(word) if embedding_vector is not None: embedding_matrix[i] = embedding_vector max_features = 200000 filter_length = 5 nb_filter = 64 pool_length = 4 model = Sequential() print('Build model...') #%% 索引矩阵转换成GloVe矩阵,至此每个单词都用一个300维的属性来描述 model1 = Sequential() #将输入的单词索引转换成GloVe向量,每40个单词(也就是一个问题的单词量)一组, #输出40x300的矩阵。相当于一个问题的特征。有点像图片了。就是一个转换功能,关键在于weights #参数,按行处理输入的数据,针对一行中的每一个索引,找到它的描述向量,最后将一行所有元素的描述 #向量拼凑起来,得到一个输出矩阵。注意输出是三维的tensor,第一维相当于样本(question)索引。 #Embedding layer can only be used as the first layer in a model. #这个转换矩阵估计很占内存 model1.add(Embedding(len(word_index) + 1, 300, weights=[embedding_matrix], input_length=40, trainable=False, name='md1')) print("Embedding ok.") '''thanks to TimeDistributed wrapper your layer could accept an input with a shape of (sequence_len, d1, ..., dn) by applying a layer provided to X[0,:,:,..,:], X[1,:,...,:], ..., X[len_of_sequence,:,...,:].''' #结合embeding的输出,就好理解了。输入的是3维的tensor,但只有后两维是有用的,TimeDistributed #的作用就是计算只在后两维进行。 # 第一个参数300x300的dense矩阵。 model1.add(TimeDistributed(Dense(300, activation='relu'))) #Wraps arbitrary expression as a Layer object. #求和时候,每个question就变成了一个300维的向量 model1.add(Lambda(lambda x: K.sum(x, axis=1), output_shape=(300,))) print("model1 ok.") model2 = Sequential() model2.add(Embedding(len(word_index) + 1, 300, weights=[embedding_matrix], input_length=40, trainable=False, name='md2')) #第二个参数,300x300的dense矩阵 model2.add(TimeDistributed(Dense(300, activation='relu'))) model2.add(Lambda(lambda x: K.sum(x, axis=1), output_shape=(300,))) print("model2 ok.") model3 = Sequential() model3.add(Embedding(len(word_index) + 1, 300, weights=[embedding_matrix], input_length=40, trainable=False, name='md3')) '''This layer creates a convolution kernel that is convolved with the layer input over a single spatial (or temporal) dimension to produce a tensor of outputs. ''' #不懂 # 输入40x300的矩阵 # (batch_size, steps, input_dim) -> (batch_size, new_steps, filters) model3.add(Convolution1D(nb_filter=nb_filter, filter_length=filter_length, border_mode='valid', activation='relu', subsample_length=1)) '''Dropout consists in randomly setting a fraction rate of input units to 0 at each update during training time, whi
ch helps prevent overfitting.''' model3.add(Dropout(0.2)) model3.add(Con
volution1D(nb_filter=nb_filter, filter_length=filter_length, border_mode='valid', activation='relu', subsample_length=1)) model3.add(GlobalMaxPooling1D()) model3.add(Dropout(0.2)) model3.add(Dense(300)) model3.add(Dropout(0.2)) '''Normalize the activations of the previous layer at each batch, i.e. applies a transformation that maintains the mean activation close to 0 and the activation standard deviation close to 1.''' #输入任意,输出和输入一致 model3.add(BatchNormalization()) print("model3 ok.") model4 = Sequential() model4.add(Embedding(len(word_index) + 1, 300, weights=[embedding_matrix], input_length=40, trainable=False, name='md4')) model4.add(Convolution1D(nb_filter=nb_filter, filter_length=filter_length, border_mode='valid', activation='relu', subsample_length=1)) model4.add(Dropout(0.2)) model4.add(Convolution1D(nb_filter=nb_filter, filter_length=filter_length, border_mode='valid', activation='relu', subsample_length=1)) #(batch_size, steps, features) -> (batch_size, downsampled_steps, features) model4.add(GlobalMaxPooling1D()) model4.add(Dropout(0.2)) model4.add(Dense(300)) model4.add(Dropout(0.2)) model4.add(BatchNormalization()) print("model4 ok.") model5 = Sequential() model5.add(Embedding(len(word_index) + 1, 300, input_length=40, dropout=0.2,name='md5')) model5.add(LSTM(300, dropout_W=0.2, dropout_U=0.2)) print("model5 ok.") model6 = Sequential() model6.add(Embedding(len(word_index) + 1, 300, input_length=40, dropout=0.2,name='md6')) #输出是300维的数据 model6.add(LSTM(300, dropout_W=0.2, dropout_U=0.2)) print("model6 ok.") merged_model = Sequential() '''It takes as input a list of tensors, all of the same shape expect for the concatenation axis, and returns a single tensor, the concatenation of all inputs.''' merged_model.add(Merge([model1, model2, model3, model4, model5, model6], mode='concat')) print("merge ok.") merged_model.add(BatchNormalization()) merged_model.add(Dense(300)) merged_model.add(PReLU()) merged_model.add(Dropout(0.2)) merged_model.add(BatchNormalization()) merged_model.add(Dense(300)) merged_model.add(PReLU()) merged_model.add(Dropout(0.2)) merged_model.add(BatchNormalization()) merged_model.add(Dense(300)) merged_model.add(PReLU()) merged_model.add(Dropout(0.2)) m
#!/usr/bin/env python # coding=utf-8 """288. An enormous factorial https://projecteuler.net/problem=288 For any prime p the number N(p,q) is defined by N(p,q) = ∑n=0 to q Tn*pn with Tn generated by the following random numb
er generator: S0 = 290797 Sn+1 = Sn2 mod 50515093 Tn = Sn mod p Let Nfac(p,q) be the factorial of N(p,q). Let NF(p,q) be the number of factor
s p in Nfac(p,q). You are given that NF(3,10000) mod 320=624955285. Find NF(61,107) mod 6110 """