_id
stringlengths 98
184
| text
stringlengths 91
10.9k
|
|---|---|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/hooks/S3_hook.py#L72-L90
|
def create_bucket(self, bucket_name, region_name=None):
"""
"""
s3_conn = self.get_conn()
if not region_name:
region_name = s3_conn.meta.region_name
if region_name == 'us-east-1':
self.get_conn().create_bucket(Bucket=bucket_name)
else:
self.get_conn().create_bucket(Bucket=bucket_name,
CreateBucketConfiguration={
'LocationConstraint': region_name
})
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/hooks/pig_hook.py#L45-L87
|
def run_cli(self, pig, verbose=True):
"""
"""
with TemporaryDirectory(prefix='airflow_pigop_') as tmp_dir:
with NamedTemporaryFile(dir=tmp_dir) as f:
f.write(pig.encode('utf-8'))
f.flush()
fname = f.name
pig_bin = 'pig'
cmd_extra = []
pig_cmd = [pig_bin, '-f', fname] + cmd_extra
if self.pig_properties:
pig_properties_list = self.pig_properties.split()
pig_cmd.extend(pig_properties_list)
if verbose:
self.log.info("%s", " ".join(pig_cmd))
sp = subprocess.Popen(
pig_cmd,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
cwd=tmp_dir,
close_fds=True)
self.sp = sp
stdout = ''
for line in iter(sp.stdout.readline, b''):
stdout += line.decode('utf-8')
if verbose:
self.log.info(line.strip())
sp.wait()
if sp.returncode:
raise AirflowException(stdout)
return stdout
|
https://github.com/soimort/you-get/blob/b746ac01c9f39de94cac2d56f665285b0523b974/src/you_get/extractors/youtube.py#L135-L143
|
def get_vid_from_url(url):
"""
"""
return match1(url, r'youtu\.be/([^?/]+)') or \
match1(url, r'youtube\.com/embed/([^/?]+)') or \
match1(url, r'youtube\.com/v/([^/?]+)') or \
match1(url, r'youtube\.com/watch/([^/?]+)') or \
parse_query_param(url, 'v') or \
parse_query_param(parse_query_param(url, 'u'), 'v')
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/utils/dag_processing.py#L946-L1020
|
def _log_file_processing_stats(self, known_file_paths):
"""
"""
# File Path: Path to the file containing the DAG definition
# PID: PID associated with the process that's processing the file. May
# be empty.
# Runtime: If the process is currently running, how long it's been
# running for in seconds.
# Last Runtime: If the process ran before, how long did it take to
# finish in seconds
# Last Run: When the file finished processing in the previous run.
headers = ["File Path",
"PID",
"Runtime",
"Last Runtime",
"Last Run"]
rows = []
for file_path in known_file_paths:
last_runtime = self.get_last_runtime(file_path)
file_name = os.path.basename(file_path)
file_name = os.path.splitext(file_name)[0].replace(os.sep, '.')
if last_runtime:
Stats.gauge(
'dag_processing.last_runtime.{}'.format(file_name),
last_runtime
)
processor_pid = self.get_pid(file_path)
processor_start_time = self.get_start_time(file_path)
runtime = ((timezone.utcnow() - processor_start_time).total_seconds()
if processor_start_time else None)
last_run = self.get_last_finish_time(file_path)
if last_run:
seconds_ago = (timezone.utcnow() - last_run).total_seconds()
Stats.gauge(
'dag_processing.last_run.seconds_ago.{}'.format(file_name),
seconds_ago
)
rows.append((file_path,
processor_pid,
runtime,
last_runtime,
last_run))
# Sort by longest last runtime. (Can't sort None values in python3)
rows = sorted(rows, key=lambda x: x[3] or 0.0)
formatted_rows = []
for file_path, pid, runtime, last_runtime, last_run in rows:
formatted_rows.append((file_path,
pid,
"{:.2f}s".format(runtime)
if runtime else None,
"{:.2f}s".format(last_runtime)
if last_runtime else None,
last_run.strftime("%Y-%m-%dT%H:%M:%S")
if last_run else None))
log_str = ("\n" +
"=" * 80 +
"\n" +
"DAG File Processing Stats\n\n" +
tabulate(formatted_rows, headers=headers) +
"\n" +
"=" * 80)
self.log.info(log_str)
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/azure_container_instance_hook.py#L95-L110
|
def get_state_exitcode_details(self, resource_group, name):
"""
"""
current_state = self._get_instance_view(resource_group, name).current_state
return (current_state.state,
current_state.exit_code,
current_state.detail_status)
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/imap_hook.py#L68-L102
|
def retrieve_mail_attachments(self,
name,
mail_folder='INBOX',
check_regex=False,
latest_only=False,
not_found_mode='raise'):
"""
"""
mail_attachments = self._retrieve_mails_attachments_by_name(name,
mail_folder,
check_regex,
latest_only)
if not mail_attachments:
self._handle_not_found_mode(not_found_mode)
return mail_attachments
|
https://github.com/soimort/you-get/blob/b746ac01c9f39de94cac2d56f665285b0523b974/src/you_get/extractors/bokecc.py#L17-L39
|
def download_by_id(self, vid = '', title = None, output_dir='.', merge=True, info_only=False,**kwargs):
""""""
assert vid
self.prepare(vid = vid, title = title, **kwargs)
self.extract(**kwargs)
self.download(output_dir = output_dir,
merge = merge,
info_only = info_only, **kwargs)
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/gcp_function_hook.py#L109-L127
|
def update_function(self, name, body, update_mask):
"""
"""
response = self.get_conn().projects().locations().functions().patch(
updateMask=",".join(update_mask),
name=name,
body=body
).execute(num_retries=self.num_retries)
operation_name = response["name"]
self._wait_for_operation_to_complete(operation_name=operation_name)
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/configuration.py#L297-L307
|
def remove_option(self, section, option, remove_default=True):
"""
"""
if super().has_option(section, option):
super().remove_option(section, option)
if self.airflow_defaults.has_option(section, option) and remove_default:
self.airflow_defaults.remove_option(section, option)
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/sensors/ftp_sensor.py#L69-L76
|
def _get_error_code(self, e):
""""""
try:
matches = self.error_code_pattern.match(str(e))
code = int(matches.group(0))
return code
except ValueError:
return e
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/bin/cli.py#L554-L563
|
def task_state(args):
"""
"""
dag = get_dag(args)
task = dag.get_task(task_id=args.task_id)
ti = TaskInstance(task, args.execution_date)
print(ti.current_state())
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/operators/gcp_container_operator.py#L310-L322
|
def _get_field(self, extras, field, default=None):
"""
"""
long_f = 'extra__google_cloud_platform__{}'.format(field)
if long_f in extras:
return extras[long_f]
else:
self.log.info('Field %s not found in extras.', field)
return default
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/macros/hive.py#L23-L55
|
def max_partition(
table, schema="default", field=None, filter_map=None,
metastore_conn_id='metastore_default'):
"""
"""
from airflow.hooks.hive_hooks import HiveMetastoreHook
if '.' in table:
schema, table = table.split('.')
hh = HiveMetastoreHook(metastore_conn_id=metastore_conn_id)
return hh.max_partition(
schema=schema, table_name=table, field=field, filter_map=filter_map)
|
https://github.com/soimort/you-get/blob/b746ac01c9f39de94cac2d56f665285b0523b974/src/you_get/extractors/mgtv.py#L27-L33
|
def get_vid_from_url(url):
"""
"""
vid = match1(url, 'https?://www.mgtv.com/(?:b|l)/\d+/(\d+).html')
if not vid:
vid = match1(url, 'https?://www.mgtv.com/hz/bdpz/\d+/(\d+).html')
return vid
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/task_runner/cgroup_task_runner.py#L66-L88
|
def _create_cgroup(self, path):
"""
"""
node = trees.Tree().root
path_split = path.split(os.sep)
for path_element in path_split:
name_to_node = {x.name: x for x in node.children}
if path_element not in name_to_node:
self.log.debug("Creating cgroup %s in %s", path_element, node.path)
node = node.create_cgroup(path_element)
else:
self.log.debug(
"Not creating cgroup %s in %s since it already exists",
path_element, node.path
)
node = name_to_node[path_element]
return node
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/ftp_hook.py#L225-L252
|
def store_file(self, remote_full_path, local_full_path_or_buffer):
"""
"""
conn = self.get_conn()
is_path = isinstance(local_full_path_or_buffer, basestring)
if is_path:
input_handle = open(local_full_path_or_buffer, 'rb')
else:
input_handle = local_full_path_or_buffer
remote_path, remote_file_name = os.path.split(remote_full_path)
conn.cwd(remote_path)
conn.storbinary('STOR %s' % remote_file_name, input_handle)
if is_path:
input_handle.close()
|
https://github.com/soimort/you-get/blob/b746ac01c9f39de94cac2d56f665285b0523b974/src/you_get/processor/ffmpeg.py#L220-L262
|
def ffmpeg_download_stream(files, title, ext, params={}, output_dir='.', stream=True):
"""
"""
output = title + '.' + ext
if not (output_dir == '.'):
output = output_dir + '/' + output
print('Downloading streaming content with FFmpeg, press q to stop recording...')
if stream:
ffmpeg_params = [FFMPEG] + ['-y', '-re', '-i']
else:
ffmpeg_params = [FFMPEG] + ['-y', '-i']
ffmpeg_params.append(files) #not the same here!!!!
if FFMPEG == 'avconv': #who cares?
ffmpeg_params += ['-c', 'copy', output]
else:
ffmpeg_params += ['-c', 'copy', '-bsf:a', 'aac_adtstoasc']
if params is not None:
if len(params) > 0:
for k, v in params:
ffmpeg_params.append(k)
ffmpeg_params.append(v)
ffmpeg_params.append(output)
print(' '.join(ffmpeg_params))
try:
a = subprocess.Popen(ffmpeg_params, stdin= subprocess.PIPE)
a.communicate()
except KeyboardInterrupt:
try:
a.stdin.write('q'.encode('utf-8'))
except:
pass
return True
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/ftp_hook.py#L274-L288
|
def get_mod_time(self, path):
"""
"""
conn = self.get_conn()
ftp_mdtm = conn.sendcmd('MDTM ' + path)
time_val = ftp_mdtm[4:]
# time_val optionally has microseconds
try:
return datetime.datetime.strptime(time_val, "%Y%m%d%H%M%S.%f")
except ValueError:
return datetime.datetime.strptime(time_val, '%Y%m%d%H%M%S')
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/models/taskinstance.py#L825-L943
|
def _run_raw_task(
self,
mark_success=False,
test_mode=False,
job_id=None,
pool=None,
session=None):
"""
"""
task = self.task
self.pool = pool or task.pool
self.test_mode = test_mode
self.refresh_from_db(session=session)
self.job_id = job_id
self.hostname = get_hostname()
self.operator = task.__class__.__name__
context = {}
actual_start_date = timezone.utcnow()
try:
if not mark_success:
context = self.get_template_context()
task_copy = copy.copy(task)
self.task = task_copy
def signal_handler(signum, frame):
self.log.error("Received SIGTERM. Terminating subprocesses.")
task_copy.on_kill()
raise AirflowException("Task received SIGTERM signal")
signal.signal(signal.SIGTERM, signal_handler)
# Don't clear Xcom until the task is certain to execute
self.clear_xcom_data()
start_time = time.time()
self.render_templates()
task_copy.pre_execute(context=context)
# If a timeout is specified for the task, make it fail
# if it goes beyond
result = None
if task_copy.execution_timeout:
try:
with timeout(int(
task_copy.execution_timeout.total_seconds())):
result = task_copy.execute(context=context)
except AirflowTaskTimeout:
task_copy.on_kill()
raise
else:
result = task_copy.execute(context=context)
# If the task returns a result, push an XCom containing it
if task_copy.do_xcom_push and result is not None:
self.xcom_push(key=XCOM_RETURN_KEY, value=result)
task_copy.post_execute(context=context, result=result)
end_time = time.time()
duration = end_time - start_time
Stats.timing(
'dag.{dag_id}.{task_id}.duration'.format(
dag_id=task_copy.dag_id,
task_id=task_copy.task_id),
duration)
Stats.incr('operator_successes_{}'.format(
self.task.__class__.__name__), 1, 1)
Stats.incr('ti_successes')
self.refresh_from_db(lock_for_update=True)
self.state = State.SUCCESS
except AirflowSkipException:
self.refresh_from_db(lock_for_update=True)
self.state = State.SKIPPED
except AirflowRescheduleException as reschedule_exception:
self.refresh_from_db()
self._handle_reschedule(actual_start_date, reschedule_exception, test_mode, context)
return
except AirflowException as e:
self.refresh_from_db()
# for case when task is marked as success/failed externally
# current behavior doesn't hit the success callback
if self.state in {State.SUCCESS, State.FAILED}:
return
else:
self.handle_failure(e, test_mode, context)
raise
except (Exception, KeyboardInterrupt) as e:
self.handle_failure(e, test_mode, context)
raise
# Success callback
try:
if task.on_success_callback:
task.on_success_callback(context)
except Exception as e3:
self.log.error("Failed when executing success callback")
self.log.exception(e3)
# Recording SUCCESS
self.end_date = timezone.utcnow()
self.set_duration()
if not test_mode:
session.add(Log(self.state, self))
session.merge(self)
session.commit()
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/gcp_sql_hook.py#L943-L959
|
def get_sqlproxy_runner(self):
"""
"""
if not self.use_proxy:
raise AirflowException("Proxy runner can only be retrieved in case of use_proxy = True")
return CloudSqlProxyRunner(
path_prefix=self.sql_proxy_unique_path,
instance_specification=self._get_sqlproxy_instance_specification(),
project_id=self.project_id,
sql_proxy_version=self.sql_proxy_version,
sql_proxy_binary_path=self.sql_proxy_binary_path
)
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/utils/log/file_task_handler.py#L82-L133
|
def _read(self, ti, try_number, metadata=None):
"""
"""
# Task instance here might be different from task instance when
# initializing the handler. Thus explicitly getting log location
# is needed to get correct log path.
log_relative_path = self._render_filename(ti, try_number)
location = os.path.join(self.local_base, log_relative_path)
log = ""
if os.path.exists(location):
try:
with open(location) as f:
log += "*** Reading local file: {}\n".format(location)
log += "".join(f.readlines())
except Exception as e:
log = "*** Failed to load local log file: {}\n".format(location)
log += "*** {}\n".format(str(e))
else:
url = os.path.join(
"http://{ti.hostname}:{worker_log_server_port}/log", log_relative_path
).format(
ti=ti,
worker_log_server_port=conf.get('celery', 'WORKER_LOG_SERVER_PORT')
)
log += "*** Log file does not exist: {}\n".format(location)
log += "*** Fetching from: {}\n".format(url)
try:
timeout = None # No timeout
try:
timeout = conf.getint('webserver', 'log_fetch_timeout_sec')
except (AirflowConfigException, ValueError):
pass
response = requests.get(url, timeout=timeout)
# Check if the resource was properly fetched
response.raise_for_status()
log += '\n' + response.text
except Exception as e:
log += "*** Failed to fetch log file from worker. {}\n".format(str(e))
return log, {'end_of_log': True}
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/azure_fileshare_hook.py#L193-L212
|
def load_stream(self, stream, share_name, directory_name, file_name, count, **kwargs):
"""
"""
self.connection.create_file_from_stream(share_name, directory_name,
file_name, stream, count, **kwargs)
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/azure_cosmos_hook.py#L208-L224
|
def insert_documents(self, documents, database_name=None, collection_name=None):
"""
"""
if documents is None:
raise AirflowBadRequest("You cannot insert empty documents")
created_documents = []
for single_document in documents:
created_documents.append(
self.get_conn().CreateItem(
get_collection_link(
self.__get_database_name(database_name),
self.__get_collection_name(collection_name)),
single_document))
return created_documents
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/models/taskinstance.py#L1363-L1368
|
def init_run_context(self, raw=False):
"""
"""
self.raw = raw
self._set_context(self)
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/hooks/S3_hook.py#L233-L277
|
def select_key(self, key, bucket_name=None,
expression='SELECT * FROM S3Object',
expression_type='SQL',
input_serialization=None,
output_serialization=None):
"""
"""
if input_serialization is None:
input_serialization = {'CSV': {}}
if output_serialization is None:
output_serialization = {'CSV': {}}
if not bucket_name:
(bucket_name, key) = self.parse_s3_url(key)
response = self.get_conn().select_object_content(
Bucket=bucket_name,
Key=key,
Expression=expression,
ExpressionType=expression_type,
InputSerialization=input_serialization,
OutputSerialization=output_serialization)
return ''.join(event['Records']['Payload'].decode('utf-8')
for event in response['Payload']
if 'Records' in event)
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/gcp_mlengine_hook.py#L244-L255
|
def create_model(self, project_id, model):
"""
"""
if not model['name']:
raise ValueError("Model name must be provided and "
"could not be an empty string")
project = 'projects/{}'.format(project_id)
request = self._mlengine.projects().models().create(
parent=project, body=model)
return request.execute()
|
https://github.com/soimort/you-get/blob/b746ac01c9f39de94cac2d56f665285b0523b974/src/you_get/extractors/bigthink.py#L22-L49
|
def get_streams_by_id(account_number, video_id):
"""
"""
endpoint = 'https://edge.api.brightcove.com/playback/v1/accounts/{account_number}/videos/{video_id}'.format(account_number = account_number, video_id = video_id)
fake_header_id = fake_headers
#is this somehow related to the time? Magic....
fake_header_id['Accept'] ='application/json;pk=BCpkADawqM1cc6wmJQC2tvoXZt4mrB7bFfi6zGt9QnOzprPZcGLE9OMGJwspQwKfuFYuCjAAJ53JdjI8zGFx1ll4rxhYJ255AXH1BQ10rnm34weknpfG-sippyQ'
html = get_content(endpoint, headers= fake_header_id)
html_json = json.loads(html)
link_list = []
for i in html_json['sources']:
if 'src' in i: #to avoid KeyError
if i['src'].startswith('https'):
link_list.append((str(i['height']), i['src']))
return link_list
|
https://github.com/soimort/you-get/blob/b746ac01c9f39de94cac2d56f665285b0523b974/src/you_get/util/log.py#L88-L92
|
def e(message, exit_code=None):
""""""
print_log(message, YELLOW, BOLD)
if exit_code is not None:
sys.exit(exit_code)
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/utils/log/logging_mixin.py#L92-L102
|
def write(self, message):
"""
"""
if not message.endswith("\n"):
self._buffer += message
else:
self._buffer += message
self.logger.log(self.level, self._buffer.rstrip())
self._buffer = str()
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/gcp_function_hook.py#L89-L107
|
def create_new_function(self, location, body, project_id=None):
"""
"""
response = self.get_conn().projects().locations().functions().create(
location=self._full_location(project_id, location),
body=body
).execute(num_retries=self.num_retries)
operation_name = response["name"]
self._wait_for_operation_to_complete(operation_name=operation_name)
|
https://github.com/soimort/you-get/blob/b746ac01c9f39de94cac2d56f665285b0523b974/src/you_get/common.py#L285-L299
|
def parse_query_param(url, param):
"""
"""
try:
return parse.parse_qs(parse.urlparse(url).query)[param][0]
except:
return None
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/utils/dag_processing.py#L856-L898
|
def start_in_sync(self):
"""
"""
while True:
agent_signal = self._signal_conn.recv()
if agent_signal == DagParsingSignal.TERMINATE_MANAGER:
self.terminate()
break
elif agent_signal == DagParsingSignal.END_MANAGER:
self.end()
sys.exit(os.EX_OK)
elif agent_signal == DagParsingSignal.AGENT_HEARTBEAT:
self._refresh_dag_dir()
simple_dags = self.heartbeat()
for simple_dag in simple_dags:
self._result_queue.put(simple_dag)
self._print_stat()
all_files_processed = all(self.get_last_finish_time(x) is not None
for x in self.file_paths)
max_runs_reached = self.max_runs_reached()
dag_parsing_stat = DagParsingStat(self._file_paths,
self.get_all_pids(),
self.max_runs_reached(),
all_files_processed,
len(simple_dags))
self._stat_queue.put(dag_parsing_stat)
self.wait_until_finished()
self._signal_conn.send(DagParsingSignal.MANAGER_DONE)
if max_runs_reached:
self.log.info("Exiting dag parsing loop as all files "
"have been processed %s times", self._max_runs)
self._signal_conn.send(DagParsingSignal.MANAGER_DONE)
break
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/utils/timezone.py#L131-L157
|
def make_naive(value, timezone=None):
"""
"""
if timezone is None:
timezone = TIMEZONE
# Emulate the behavior of astimezone() on Python < 3.6.
if is_naive(value):
raise ValueError("make_naive() cannot be applied to a naive datetime")
o = value.astimezone(timezone)
# cross library compatibility
naive = dt.datetime(o.year,
o.month,
o.day,
o.hour,
o.minute,
o.second,
o.microsecond)
return naive
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/hooks/hive_hooks.py#L489-L524
|
def get_metastore_client(self):
"""
"""
import hmsclient
from thrift.transport import TSocket, TTransport
from thrift.protocol import TBinaryProtocol
ms = self.metastore_conn
auth_mechanism = ms.extra_dejson.get('authMechanism', 'NOSASL')
if configuration.conf.get('core', 'security') == 'kerberos':
auth_mechanism = ms.extra_dejson.get('authMechanism', 'GSSAPI')
kerberos_service_name = ms.extra_dejson.get('kerberos_service_name', 'hive')
socket = TSocket.TSocket(ms.host, ms.port)
if configuration.conf.get('core', 'security') == 'kerberos' \
and auth_mechanism == 'GSSAPI':
try:
import saslwrapper as sasl
except ImportError:
import sasl
def sasl_factory():
sasl_client = sasl.Client()
sasl_client.setAttr("host", ms.host)
sasl_client.setAttr("service", kerberos_service_name)
sasl_client.init()
return sasl_client
from thrift_sasl import TSaslClientTransport
transport = TSaslClientTransport(sasl_factory, "GSSAPI", socket)
else:
transport = TTransport.TBufferedTransport(socket)
protocol = TBinaryProtocol.TBinaryProtocol(transport)
return hmsclient.HMSClient(iprot=protocol)
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/ti_deps/deps/trigger_rule_dep.py#L91-L224
|
def _evaluate_trigger_rule(
self,
ti,
successes,
skipped,
failed,
upstream_failed,
done,
flag_upstream_failed,
session):
"""
"""
TR = airflow.utils.trigger_rule.TriggerRule
task = ti.task
upstream = len(task.upstream_task_ids)
tr = task.trigger_rule
upstream_done = done >= upstream
upstream_tasks_state = {
"total": upstream, "successes": successes, "skipped": skipped,
"failed": failed, "upstream_failed": upstream_failed, "done": done
}
# TODO(aoen): Ideally each individual trigger rules would be its own class, but
# this isn't very feasible at the moment since the database queries need to be
# bundled together for efficiency.
# handling instant state assignment based on trigger rules
if flag_upstream_failed:
if tr == TR.ALL_SUCCESS:
if upstream_failed or failed:
ti.set_state(State.UPSTREAM_FAILED, session)
elif skipped:
ti.set_state(State.SKIPPED, session)
elif tr == TR.ALL_FAILED:
if successes or skipped:
ti.set_state(State.SKIPPED, session)
elif tr == TR.ONE_SUCCESS:
if upstream_done and not successes:
ti.set_state(State.SKIPPED, session)
elif tr == TR.ONE_FAILED:
if upstream_done and not (failed or upstream_failed):
ti.set_state(State.SKIPPED, session)
elif tr == TR.NONE_FAILED:
if upstream_failed or failed:
ti.set_state(State.UPSTREAM_FAILED, session)
elif skipped == upstream:
ti.set_state(State.SKIPPED, session)
elif tr == TR.NONE_SKIPPED:
if skipped:
ti.set_state(State.SKIPPED, session)
if tr == TR.ONE_SUCCESS:
if successes <= 0:
yield self._failing_status(
reason="Task's trigger rule '{0}' requires one upstream "
"task success, but none were found. "
"upstream_tasks_state={1}, upstream_task_ids={2}"
.format(tr, upstream_tasks_state, task.upstream_task_ids))
elif tr == TR.ONE_FAILED:
if not failed and not upstream_failed:
yield self._failing_status(
reason="Task's trigger rule '{0}' requires one upstream "
"task failure, but none were found. "
"upstream_tasks_state={1}, upstream_task_ids={2}"
.format(tr, upstream_tasks_state, task.upstream_task_ids))
elif tr == TR.ALL_SUCCESS:
num_failures = upstream - successes
if num_failures > 0:
yield self._failing_status(
reason="Task's trigger rule '{0}' requires all upstream "
"tasks to have succeeded, but found {1} non-success(es). "
"upstream_tasks_state={2}, upstream_task_ids={3}"
.format(tr, num_failures, upstream_tasks_state,
task.upstream_task_ids))
elif tr == TR.ALL_FAILED:
num_successes = upstream - failed - upstream_failed
if num_successes > 0:
yield self._failing_status(
reason="Task's trigger rule '{0}' requires all upstream "
"tasks to have failed, but found {1} non-failure(s). "
"upstream_tasks_state={2}, upstream_task_ids={3}"
.format(tr, num_successes, upstream_tasks_state,
task.upstream_task_ids))
elif tr == TR.ALL_DONE:
if not upstream_done:
yield self._failing_status(
reason="Task's trigger rule '{0}' requires all upstream "
"tasks to have completed, but found {1} task(s) that "
"weren't done. upstream_tasks_state={2}, "
"upstream_task_ids={3}"
.format(tr, upstream_done, upstream_tasks_state,
task.upstream_task_ids))
elif tr == TR.NONE_FAILED:
num_failures = upstream - successes - skipped
if num_failures > 0:
yield self._failing_status(
reason="Task's trigger rule '{0}' requires all upstream "
"tasks to have succeeded or been skipped, but found {1} non-success(es). "
"upstream_tasks_state={2}, upstream_task_ids={3}"
.format(tr, num_failures, upstream_tasks_state,
task.upstream_task_ids))
elif tr == TR.NONE_SKIPPED:
if skipped > 0:
yield self._failing_status(
reason="Task's trigger rule '{0}' requires all upstream "
"tasks to not have been skipped, but found {1} task(s) skipped. "
"upstream_tasks_state={2}, upstream_task_ids={3}"
.format(tr, skipped, upstream_tasks_state,
task.upstream_task_ids))
else:
yield self._failing_status(
reason="No strategy to evaluate trigger rule '{0}'.".format(tr))
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/utils/cli.py#L36-L81
|
def action_logging(f):
"""
"""
@functools.wraps(f)
def wrapper(*args, **kwargs):
"""
An wrapper for cli functions. It assumes to have Namespace instance
at 1st positional argument
:param args: Positional argument. It assumes to have Namespace instance
at 1st positional argument
:param kwargs: A passthrough keyword argument
"""
assert args
assert isinstance(args[0], Namespace), \
"1st positional argument should be argparse.Namespace instance, " \
"but {}".format(args[0])
metrics = _build_metrics(f.__name__, args[0])
cli_action_loggers.on_pre_execution(**metrics)
try:
return f(*args, **kwargs)
except Exception as e:
metrics['error'] = e
raise
finally:
metrics['end_datetime'] = datetime.utcnow()
cli_action_loggers.on_post_execution(**metrics)
return wrapper
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/utils/helpers.py#L131-L140
|
def reduce_in_chunks(fn, iterable, initializer, chunk_size=0):
"""
"""
if len(iterable) == 0:
return initializer
if chunk_size == 0:
chunk_size = len(iterable)
return reduce(fn, chunks(iterable, chunk_size), initializer)
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/utils/dag_processing.py#L275-L286
|
def correct_maybe_zipped(fileloc):
"""
"""
_, archive, filename = re.search(
r'((.*\.zip){})?(.*)'.format(re.escape(os.sep)), fileloc).groups()
if archive and zipfile.is_zipfile(archive):
return archive
else:
return fileloc
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/sensors/redis_pub_sub_sensor.py#L50-L73
|
def poke(self, context):
"""
"""
self.log.info('RedisPubSubSensor checking for message on channels: %s', self.channels)
message = self.pubsub.get_message()
self.log.info('Message %s from channel %s', message, self.channels)
# Process only message types
if message and message['type'] == 'message':
context['ti'].xcom_push(key='message', value=message)
self.pubsub.unsubscribe(self.channels)
return True
return False
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/gcp_transfer_hook.py#L265-L298
|
def list_transfer_operations(self, filter):
"""
"""
conn = self.get_conn()
filter = self._inject_project_id(filter, FILTER, FILTER_PROJECT_ID)
operations = []
request = conn.transferOperations().list(name=TRANSFER_OPERATIONS, filter=json.dumps(filter))
while request is not None:
response = request.execute(num_retries=self.num_retries)
if OPERATIONS in response:
operations.extend(response[OPERATIONS])
request = conn.transferOperations().list_next(
previous_request=request, previous_response=response
)
return operations
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/gcp_function_hook.py#L161-L172
|
def delete_function(self, name):
"""
"""
response = self.get_conn().projects().locations().functions().delete(
name=name).execute(num_retries=self.num_retries)
operation_name = response["name"]
self._wait_for_operation_to_complete(operation_name=operation_name)
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/hooks/presto_hook.py#L129-L140
|
def insert_rows(self, table, rows, target_fields=None):
"""
"""
super().insert_rows(table, rows, target_fields, 0)
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/gcp_vision_hook.py#L353-L375
|
def delete_reference_image(
self,
location,
product_id,
reference_image_id,
project_id=None,
retry=None,
timeout=None,
metadata=None,
):
"""
"""
client = self.get_conn()
self.log.info('Deleting ReferenceImage')
name = ProductSearchClient.reference_image_path(
project=project_id, location=location, product=product_id, reference_image=reference_image_id
)
response = client.delete_reference_image(name=name, retry=retry, timeout=timeout, metadata=metadata)
self.log.info('ReferenceImage with the name [%s] deleted.', name)
return MessageToDict(response)
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/hooks/hive_hooks.py#L374-L457
|
def load_file(
self,
filepath,
table,
delimiter=",",
field_dict=None,
create=True,
overwrite=True,
partition=None,
recreate=False,
tblproperties=None):
"""
"""
hql = ''
if recreate:
hql += "DROP TABLE IF EXISTS {table};\n".format(table=table)
if create or recreate:
if field_dict is None:
raise ValueError("Must provide a field dict when creating a table")
fields = ",\n ".join(
[k + ' ' + v for k, v in field_dict.items()])
hql += "CREATE TABLE IF NOT EXISTS {table} (\n{fields})\n".format(
table=table, fields=fields)
if partition:
pfields = ",\n ".join(
[p + " STRING" for p in partition])
hql += "PARTITIONED BY ({pfields})\n".format(pfields=pfields)
hql += "ROW FORMAT DELIMITED\n"
hql += "FIELDS TERMINATED BY '{delimiter}'\n".format(delimiter=delimiter)
hql += "STORED AS textfile\n"
if tblproperties is not None:
tprops = ", ".join(
["'{0}'='{1}'".format(k, v) for k, v in tblproperties.items()])
hql += "TBLPROPERTIES({tprops})\n".format(tprops=tprops)
hql += ";"
self.log.info(hql)
self.run_cli(hql)
hql = "LOAD DATA LOCAL INPATH '{filepath}' ".format(filepath=filepath)
if overwrite:
hql += "OVERWRITE "
hql += "INTO TABLE {table} ".format(table=table)
if partition:
pvals = ", ".join(
["{0}='{1}'".format(k, v) for k, v in partition.items()])
hql += "PARTITION ({pvals})".format(pvals=pvals)
# As a workaround for HIVE-10541, add a newline character
# at the end of hql (AIRFLOW-2412).
hql += ';\n'
self.log.info(hql)
self.run_cli(hql)
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/datadog_hook.py#L88-L111
|
def query_metric(self,
query,
from_seconds_ago,
to_seconds_ago):
"""
"""
now = int(time.time())
response = api.Metric.query(
start=now - from_seconds_ago,
end=now - to_seconds_ago,
query=query)
self.validate_response(response)
return response
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/models/taskinstance.py#L59-L107
|
def clear_task_instances(tis,
session,
activate_dag_runs=True,
dag=None,
):
"""
"""
job_ids = []
for ti in tis:
if ti.state == State.RUNNING:
if ti.job_id:
ti.state = State.SHUTDOWN
job_ids.append(ti.job_id)
else:
task_id = ti.task_id
if dag and dag.has_task(task_id):
task = dag.get_task(task_id)
task_retries = task.retries
ti.max_tries = ti.try_number + task_retries - 1
else:
# Ignore errors when updating max_tries if dag is None or
# task not found in dag since database records could be
# outdated. We make max_tries the maximum value of its
# original max_tries or the current task try number.
ti.max_tries = max(ti.max_tries, ti.try_number - 1)
ti.state = State.NONE
session.merge(ti)
if job_ids:
from airflow.jobs import BaseJob as BJ
for job in session.query(BJ).filter(BJ.id.in_(job_ids)).all():
job.state = State.SHUTDOWN
if activate_dag_runs and tis:
from airflow.models.dagrun import DagRun # Avoid circular import
drs = session.query(DagRun).filter(
DagRun.dag_id.in_({ti.dag_id for ti in tis}),
DagRun.execution_date.in_({ti.execution_date for ti in tis}),
).all()
for dr in drs:
dr.state = State.RUNNING
dr.start_date = timezone.utcnow()
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/api/common/experimental/get_task_instance.py#L25-L55
|
def get_task_instance(dag_id, task_id, execution_date):
""""""
dagbag = DagBag()
# Check DAG exists.
if dag_id not in dagbag.dags:
error_message = "Dag id {} not found".format(dag_id)
raise DagNotFound(error_message)
# Get DAG object and check Task Exists
dag = dagbag.get_dag(dag_id)
if not dag.has_task(task_id):
error_message = 'Task {} not found in dag {}'.format(task_id, dag_id)
raise TaskNotFound(error_message)
# Get DagRun object and check that it exists
dagrun = dag.get_dagrun(execution_date=execution_date)
if not dagrun:
error_message = ('Dag Run for date {} not found in dag {}'
.format(execution_date, dag_id))
raise DagRunNotFound(error_message)
# Get task instance object and check that it exists
task_instance = dagrun.get_task_instance(task_id)
if not task_instance:
error_message = ('Task {} instance for date {} not found'
.format(task_id, execution_date))
raise TaskInstanceNotFound(error_message)
return task_instance
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/models/dagbag.py#L145-L271
|
def process_file(self, filepath, only_if_updated=True, safe_mode=True):
"""
"""
from airflow.models.dag import DAG # Avoid circular import
found_dags = []
# if the source file no longer exists in the DB or in the filesystem,
# return an empty list
# todo: raise exception?
if filepath is None or not os.path.isfile(filepath):
return found_dags
try:
# This failed before in what may have been a git sync
# race condition
file_last_changed_on_disk = datetime.fromtimestamp(os.path.getmtime(filepath))
if only_if_updated \
and filepath in self.file_last_changed \
and file_last_changed_on_disk == self.file_last_changed[filepath]:
return found_dags
except Exception as e:
self.log.exception(e)
return found_dags
mods = []
is_zipfile = zipfile.is_zipfile(filepath)
if not is_zipfile:
if safe_mode:
with open(filepath, 'rb') as f:
content = f.read()
if not all([s in content for s in (b'DAG', b'airflow')]):
self.file_last_changed[filepath] = file_last_changed_on_disk
# Don't want to spam user with skip messages
if not self.has_logged:
self.has_logged = True
self.log.info(
"File %s assumed to contain no DAGs. Skipping.",
filepath)
return found_dags
self.log.debug("Importing %s", filepath)
org_mod_name, _ = os.path.splitext(os.path.split(filepath)[-1])
mod_name = ('unusual_prefix_' +
hashlib.sha1(filepath.encode('utf-8')).hexdigest() +
'_' + org_mod_name)
if mod_name in sys.modules:
del sys.modules[mod_name]
with timeout(configuration.conf.getint('core', "DAGBAG_IMPORT_TIMEOUT")):
try:
m = imp.load_source(mod_name, filepath)
mods.append(m)
except Exception as e:
self.log.exception("Failed to import: %s", filepath)
self.import_errors[filepath] = str(e)
self.file_last_changed[filepath] = file_last_changed_on_disk
else:
zip_file = zipfile.ZipFile(filepath)
for mod in zip_file.infolist():
head, _ = os.path.split(mod.filename)
mod_name, ext = os.path.splitext(mod.filename)
if not head and (ext == '.py' or ext == '.pyc'):
if mod_name == '__init__':
self.log.warning("Found __init__.%s at root of %s", ext, filepath)
if safe_mode:
with zip_file.open(mod.filename) as zf:
self.log.debug("Reading %s from %s", mod.filename, filepath)
content = zf.read()
if not all([s in content for s in (b'DAG', b'airflow')]):
self.file_last_changed[filepath] = (
file_last_changed_on_disk)
# todo: create ignore list
# Don't want to spam user with skip messages
if not self.has_logged:
self.has_logged = True
self.log.info(
"File %s assumed to contain no DAGs. Skipping.",
filepath)
if mod_name in sys.modules:
del sys.modules[mod_name]
try:
sys.path.insert(0, filepath)
m = importlib.import_module(mod_name)
mods.append(m)
except Exception as e:
self.log.exception("Failed to import: %s", filepath)
self.import_errors[filepath] = str(e)
self.file_last_changed[filepath] = file_last_changed_on_disk
for m in mods:
for dag in list(m.__dict__.values()):
if isinstance(dag, DAG):
if not dag.full_filepath:
dag.full_filepath = filepath
if dag.fileloc != filepath and not is_zipfile:
dag.fileloc = filepath
try:
dag.is_subdag = False
self.bag_dag(dag, parent_dag=dag, root_dag=dag)
if isinstance(dag._schedule_interval, six.string_types):
croniter(dag._schedule_interval)
found_dags.append(dag)
found_dags += dag.subdags
except (CroniterBadCronError,
CroniterBadDateError,
CroniterNotAlphaError) as cron_e:
self.log.exception("Failed to bag_dag: %s", dag.full_filepath)
self.import_errors[dag.full_filepath] = \
"Invalid Cron expression: " + str(cron_e)
self.file_last_changed[dag.full_filepath] = \
file_last_changed_on_disk
except AirflowDagCycleException as cycle_exception:
self.log.exception("Failed to bag_dag: %s", dag.full_filepath)
self.import_errors[dag.full_filepath] = str(cycle_exception)
self.file_last_changed[dag.full_filepath] = \
file_last_changed_on_disk
self.file_last_changed[filepath] = file_last_changed_on_disk
return found_dags
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/aws_hook.py#L194-L205
|
def expand_role(self, role):
"""
"""
if '/' in role:
return role
else:
return self.get_client_type('iam').get_role(RoleName=role)['Role']['Arn']
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/azure_fileshare_hook.py#L47-L62
|
def check_for_directory(self, share_name, directory_name, **kwargs):
"""
"""
return self.connection.exists(share_name, directory_name,
**kwargs)
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/mongo_hook.py#L144-L163
|
def update_one(self, mongo_collection, filter_doc, update_doc,
mongo_db=None, **kwargs):
"""
"""
collection = self.get_collection(mongo_collection, mongo_db=mongo_db)
return collection.update_one(filter_doc, update_doc, **kwargs)
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/security/kerberos.py#L100-L110
|
def detect_conf_var():
"""
"""
ticket_cache = configuration.conf.get('kerberos', 'ccache')
with open(ticket_cache, 'rb') as f:
# Note: this file is binary, so we check against a bytearray.
return b'X-CACHECONF:' in f.read()
|
https://github.com/soimort/you-get/blob/b746ac01c9f39de94cac2d56f665285b0523b974/src/you_get/extractors/ckplayer.py#L13-L39
|
def ckplayer_get_info_by_xml(ckinfo):
""""""
e = ET.XML(ckinfo)
video_dict = {'title': '',
#'duration': 0,
'links': [],
'size': 0,
'flashvars': '',}
dictified = dictify(e)['ckplayer']
if 'info' in dictified:
if '_text' in dictified['info'][0]['title'][0]: #title
video_dict['title'] = dictified['info'][0]['title'][0]['_text'].strip()
#if dictify(e)['ckplayer']['info'][0]['title'][0]['_text'].strip(): #duration
#video_dict['title'] = dictify(e)['ckplayer']['info'][0]['title'][0]['_text'].strip()
if '_text' in dictified['video'][0]['size'][0]: #size exists for 1 piece
video_dict['size'] = sum([int(i['size'][0]['_text']) for i in dictified['video']])
if '_text' in dictified['video'][0]['file'][0]: #link exist
video_dict['links'] = [i['file'][0]['_text'].strip() for i in dictified['video']]
if '_text' in dictified['flashvars'][0]:
video_dict['flashvars'] = dictified['flashvars'][0]['_text'].strip()
return video_dict
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/gcp_bigtable_hook.py#L171-L196
|
def create_table(instance,
table_id,
initial_split_keys=None,
column_families=None):
"""
"""
if column_families is None:
column_families = {}
if initial_split_keys is None:
initial_split_keys = []
table = Table(table_id, instance)
table.create(initial_split_keys, column_families)
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/azure_data_lake_hook.py#L70-L104
|
def upload_file(self, local_path, remote_path, nthreads=64, overwrite=True,
buffersize=4194304, blocksize=4194304):
"""
"""
multithread.ADLUploader(self.connection,
lpath=local_path,
rpath=remote_path,
nthreads=nthreads,
overwrite=overwrite,
buffersize=buffersize,
blocksize=blocksize)
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/utils/log/wasb_task_handler.py#L154-L178
|
def wasb_write(self, log, remote_log_location, append=True):
"""
"""
if append and self.wasb_log_exists(remote_log_location):
old_log = self.wasb_read(remote_log_location)
log = '\n'.join([old_log, log]) if old_log else log
try:
self.hook.load_string(
log,
self.wasb_container,
remote_log_location,
)
except AzureHttpError:
self.log.exception('Could not write logs to %s',
remote_log_location)
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/hooks/S3_hook.py#L92-L107
|
def check_for_prefix(self, bucket_name, prefix, delimiter):
"""
"""
prefix = prefix + delimiter if prefix[-1] != delimiter else prefix
prefix_split = re.split(r'(\w+[{d}])$'.format(d=delimiter), prefix, 1)
previous_level = prefix_split[0]
plist = self.list_prefixes(bucket_name, previous_level, delimiter)
return False if plist is None else prefix in plist
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/models/dagrun.py#L221-L229
|
def get_previous_dagrun(self, session=None):
""""""
return session.query(DagRun).filter(
DagRun.dag_id == self.dag_id,
DagRun.execution_date < self.execution_date
).order_by(
DagRun.execution_date.desc()
).first()
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/utils/sendgrid.py#L33-L102
|
def send_email(to, subject, html_content, files=None, dryrun=False, cc=None,
bcc=None, mime_subtype='mixed', sandbox_mode=False, **kwargs):
"""
"""
if files is None:
files = []
mail = Mail()
from_email = kwargs.get('from_email') or os.environ.get('SENDGRID_MAIL_FROM')
from_name = kwargs.get('from_name') or os.environ.get('SENDGRID_MAIL_SENDER')
mail.from_email = Email(from_email, from_name)
mail.subject = subject
mail.mail_settings = MailSettings()
if sandbox_mode:
mail.mail_settings.sandbox_mode = SandBoxMode(enable=True)
# Add the recipient list of to emails.
personalization = Personalization()
to = get_email_address_list(to)
for to_address in to:
personalization.add_to(Email(to_address))
if cc:
cc = get_email_address_list(cc)
for cc_address in cc:
personalization.add_cc(Email(cc_address))
if bcc:
bcc = get_email_address_list(bcc)
for bcc_address in bcc:
personalization.add_bcc(Email(bcc_address))
# Add custom_args to personalization if present
pers_custom_args = kwargs.get('personalization_custom_args', None)
if isinstance(pers_custom_args, dict):
for key in pers_custom_args.keys():
personalization.add_custom_arg(CustomArg(key, pers_custom_args[key]))
mail.add_personalization(personalization)
mail.add_content(Content('text/html', html_content))
categories = kwargs.get('categories', [])
for cat in categories:
mail.add_category(Category(cat))
# Add email attachment.
for fname in files:
basename = os.path.basename(fname)
attachment = Attachment()
attachment.type = mimetypes.guess_type(basename)[0]
attachment.filename = basename
attachment.disposition = "attachment"
attachment.content_id = '<{0}>'.format(basename)
with open(fname, "rb") as f:
attachment.content = base64.b64encode(f.read()).decode('utf-8')
mail.add_attachment(attachment)
_post_sendgrid_mail(mail.get())
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/utils/email.py#L53-L96
|
def send_email_smtp(to, subject, html_content, files=None,
dryrun=False, cc=None, bcc=None,
mime_subtype='mixed', mime_charset='utf-8',
**kwargs):
"""
"""
smtp_mail_from = configuration.conf.get('smtp', 'SMTP_MAIL_FROM')
to = get_email_address_list(to)
msg = MIMEMultipart(mime_subtype)
msg['Subject'] = subject
msg['From'] = smtp_mail_from
msg['To'] = ", ".join(to)
recipients = to
if cc:
cc = get_email_address_list(cc)
msg['CC'] = ", ".join(cc)
recipients = recipients + cc
if bcc:
# don't add bcc in header
bcc = get_email_address_list(bcc)
recipients = recipients + bcc
msg['Date'] = formatdate(localtime=True)
mime_text = MIMEText(html_content, 'html', mime_charset)
msg.attach(mime_text)
for fname in files or []:
basename = os.path.basename(fname)
with open(fname, "rb") as f:
part = MIMEApplication(
f.read(),
Name=basename
)
part['Content-Disposition'] = 'attachment; filename="%s"' % basename
part['Content-ID'] = '<%s>' % basename
msg.attach(part)
send_MIME_email(smtp_mail_from, recipients, msg, dryrun)
|
https://github.com/soimort/you-get/blob/b746ac01c9f39de94cac2d56f665285b0523b974/src/you_get/extractors/miomio.py#L41-L51
|
def sina_xml_to_url_list(xml_data):
"""
"""
rawurl = []
dom = parseString(xml_data)
for node in dom.getElementsByTagName('durl'):
url = node.getElementsByTagName('url')[0]
rawurl.append(url.childNodes[0].data)
return rawurl
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/azure_container_instance_hook.py#L160-L172
|
def exists(self, resource_group, name):
"""
"""
for container in self.connection.container_groups.list_by_resource_group(resource_group):
if container.name == name:
return True
return False
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/utils/log/file_task_handler.py#L135-L168
|
def read(self, task_instance, try_number=None, metadata=None):
"""
"""
# Task instance increments its try number when it starts to run.
# So the log for a particular task try will only show up when
# try number gets incremented in DB, i.e logs produced the time
# after cli run and before try_number + 1 in DB will not be displayed.
if try_number is None:
next_try = task_instance.next_try_number
try_numbers = list(range(1, next_try))
elif try_number < 1:
logs = [
'Error fetching the logs. Try number {} is invalid.'.format(try_number),
]
return logs
else:
try_numbers = [try_number]
logs = [''] * len(try_numbers)
metadatas = [{}] * len(try_numbers)
for i, try_number in enumerate(try_numbers):
log, metadata = self._read(task_instance, try_number, metadata)
logs[i] += log
metadatas[i] = metadata
return logs, metadatas
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/operators/aws_athena_operator.py#L93-L115
|
def on_kill(self):
"""
"""
if self.query_execution_id:
self.log.info('⚰️⚰️⚰️ Received a kill Signal. Time to Die')
self.log.info(
'Stopping Query with executionId - %s', self.query_execution_id
)
response = self.hook.stop_query(self.query_execution_id)
http_status_code = None
try:
http_status_code = response['ResponseMetadata']['HTTPStatusCode']
except Exception as ex:
self.log.error('Exception while cancelling query', ex)
finally:
if http_status_code is None or http_status_code != 200:
self.log.error('Unable to request query cancel on athena. Exiting')
else:
self.log.info(
'Polling Athena for query with id %s to reach final state', self.query_execution_id
)
self.hook.poll_query_status(self.query_execution_id)
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/slack_webhook_hook.py#L99-L119
|
def _build_slack_message(self):
"""
"""
cmd = {}
if self.channel:
cmd['channel'] = self.channel
if self.username:
cmd['username'] = self.username
if self.icon_emoji:
cmd['icon_emoji'] = self.icon_emoji
if self.link_names:
cmd['link_names'] = 1
if self.attachments:
cmd['attachments'] = self.attachments
cmd['text'] = self.message
return json.dumps(cmd)
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/azure_fileshare_hook.py#L155-L172
|
def load_file(self, file_path, share_name, directory_name, file_name, **kwargs):
"""
"""
self.connection.create_file_from_path(share_name, directory_name,
file_name, file_path, **kwargs)
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/hooks/hive_hooks.py#L556-L576
|
def check_for_named_partition(self, schema, table, partition_name):
"""
"""
with self.metastore as client:
return client.check_for_named_partition(schema, table, partition_name)
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/hooks/S3_hook.py#L220-L231
|
def read_key(self, key, bucket_name=None):
"""
"""
obj = self.get_key(key, bucket_name)
return obj.get()['Body'].read().decode('utf-8')
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/utils/net.py#L25-L45
|
def get_hostname():
"""
"""
# First we attempt to fetch the callable path from the config.
try:
callable_path = conf.get('core', 'hostname_callable')
except AirflowConfigException:
callable_path = None
# Then we handle the case when the config is missing or empty. This is the
# default behavior.
if not callable_path:
return socket.getfqdn()
# Since we have a callable path, we try to import and run it next.
module_path, attr_name = callable_path.split(':')
module = importlib.import_module(module_path)
callable = getattr(module, attr_name)
return callable()
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/mongo_hook.py#L114-L124
|
def find(self, mongo_collection, query, find_one=False, mongo_db=None, **kwargs):
"""
"""
collection = self.get_collection(mongo_collection, mongo_db=mongo_db)
if find_one:
return collection.find_one(query, **kwargs)
else:
return collection.find(query, **kwargs)
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/imap_hook.py#L104-L141
|
def download_mail_attachments(self,
name,
local_output_directory,
mail_folder='INBOX',
check_regex=False,
latest_only=False,
not_found_mode='raise'):
"""
"""
mail_attachments = self._retrieve_mails_attachments_by_name(name,
mail_folder,
check_regex,
latest_only)
if not mail_attachments:
self._handle_not_found_mode(not_found_mode)
self._create_files(mail_attachments, local_output_directory)
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/bigquery_hook.py#L996-L1209
|
def run_load(self,
destination_project_dataset_table,
source_uris,
schema_fields=None,
source_format='CSV',
create_disposition='CREATE_IF_NEEDED',
skip_leading_rows=0,
write_disposition='WRITE_EMPTY',
field_delimiter=',',
max_bad_records=0,
quote_character=None,
ignore_unknown_values=False,
allow_quoted_newlines=False,
allow_jagged_rows=False,
schema_update_options=(),
src_fmt_configs=None,
time_partitioning=None,
cluster_fields=None,
autodetect=False):
"""
"""
# bigquery only allows certain source formats
# we check to make sure the passed source format is valid
# if it's not, we raise a ValueError
# Refer to this link for more details:
# https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#configuration.query.tableDefinitions.(key).sourceFormat
if schema_fields is None and not autodetect:
raise ValueError(
'You must either pass a schema or autodetect=True.')
if src_fmt_configs is None:
src_fmt_configs = {}
source_format = source_format.upper()
allowed_formats = [
"CSV", "NEWLINE_DELIMITED_JSON", "AVRO", "GOOGLE_SHEETS",
"DATASTORE_BACKUP", "PARQUET"
]
if source_format not in allowed_formats:
raise ValueError("{0} is not a valid source format. "
"Please use one of the following types: {1}"
.format(source_format, allowed_formats))
# bigquery also allows you to define how you want a table's schema to change
# as a side effect of a load
# for more details:
# https://cloud.google.com/bigquery/docs/reference/rest/v2/jobs#configuration.load.schemaUpdateOptions
allowed_schema_update_options = [
'ALLOW_FIELD_ADDITION', "ALLOW_FIELD_RELAXATION"
]
if not set(allowed_schema_update_options).issuperset(
set(schema_update_options)):
raise ValueError(
"{0} contains invalid schema update options."
"Please only use one or more of the following options: {1}"
.format(schema_update_options, allowed_schema_update_options))
destination_project, destination_dataset, destination_table = \
_split_tablename(table_input=destination_project_dataset_table,
default_project_id=self.project_id,
var_name='destination_project_dataset_table')
configuration = {
'load': {
'autodetect': autodetect,
'createDisposition': create_disposition,
'destinationTable': {
'projectId': destination_project,
'datasetId': destination_dataset,
'tableId': destination_table,
},
'sourceFormat': source_format,
'sourceUris': source_uris,
'writeDisposition': write_disposition,
'ignoreUnknownValues': ignore_unknown_values
}
}
time_partitioning = _cleanse_time_partitioning(
destination_project_dataset_table,
time_partitioning
)
if time_partitioning:
configuration['load'].update({
'timePartitioning': time_partitioning
})
if cluster_fields:
configuration['load'].update({'clustering': {'fields': cluster_fields}})
if schema_fields:
configuration['load']['schema'] = {'fields': schema_fields}
if schema_update_options:
if write_disposition not in ["WRITE_APPEND", "WRITE_TRUNCATE"]:
raise ValueError("schema_update_options is only "
"allowed if write_disposition is "
"'WRITE_APPEND' or 'WRITE_TRUNCATE'.")
else:
self.log.info(
"Adding experimental 'schemaUpdateOptions': %s",
schema_update_options
)
configuration['load'][
'schemaUpdateOptions'] = schema_update_options
if max_bad_records:
configuration['load']['maxBadRecords'] = max_bad_records
# if following fields are not specified in src_fmt_configs,
# honor the top-level params for backward-compatibility
if 'skipLeadingRows' not in src_fmt_configs:
src_fmt_configs['skipLeadingRows'] = skip_leading_rows
if 'fieldDelimiter' not in src_fmt_configs:
src_fmt_configs['fieldDelimiter'] = field_delimiter
if 'ignoreUnknownValues' not in src_fmt_configs:
src_fmt_configs['ignoreUnknownValues'] = ignore_unknown_values
if quote_character is not None:
src_fmt_configs['quote'] = quote_character
if allow_quoted_newlines:
src_fmt_configs['allowQuotedNewlines'] = allow_quoted_newlines
src_fmt_to_configs_mapping = {
'CSV': [
'allowJaggedRows', 'allowQuotedNewlines', 'autodetect',
'fieldDelimiter', 'skipLeadingRows', 'ignoreUnknownValues',
'nullMarker', 'quote'
],
'DATASTORE_BACKUP': ['projectionFields'],
'NEWLINE_DELIMITED_JSON': ['autodetect', 'ignoreUnknownValues'],
'PARQUET': ['autodetect', 'ignoreUnknownValues'],
'AVRO': ['useAvroLogicalTypes'],
}
valid_configs = src_fmt_to_configs_mapping[source_format]
src_fmt_configs = {
k: v
for k, v in src_fmt_configs.items() if k in valid_configs
}
configuration['load'].update(src_fmt_configs)
if allow_jagged_rows:
configuration['load']['allowJaggedRows'] = allow_jagged_rows
return self.run_with_configuration(configuration)
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/scripts/perf/scheduler_ops_metrics.py#L169-L179
|
def set_dags_paused_state(is_paused):
"""
"""
session = settings.Session()
dms = session.query(DagModel).filter(
DagModel.dag_id.in_(DAG_IDS))
for dm in dms:
logging.info('Setting DAG :: {} is_paused={}'.format(dm, is_paused))
dm.is_paused = is_paused
session.commit()
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/hooks/http_hook.py#L149-L181
|
def run_and_check(self, session, prepped_request, extra_options):
"""
"""
extra_options = extra_options or {}
try:
response = session.send(
prepped_request,
stream=extra_options.get("stream", False),
verify=extra_options.get("verify", True),
proxies=extra_options.get("proxies", {}),
cert=extra_options.get("cert"),
timeout=extra_options.get("timeout"),
allow_redirects=extra_options.get("allow_redirects", True))
if extra_options.get('check_response', True):
self.check_response(response)
return response
except requests.exceptions.ConnectionError as ex:
self.log.warn(str(ex) + ' Tenacity will retry to execute the operation')
raise ex
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/models/dagrun.py#L392-L407
|
def get_run(session, dag_id, execution_date):
"""
"""
qry = session.query(DagRun).filter(
DagRun.dag_id == dag_id,
DagRun.external_trigger == False, # noqa
DagRun.execution_date == execution_date,
)
return qry.first()
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/scripts/perf/scheduler_ops_metrics.py#L151-L166
|
def clear_dag_task_instances():
"""
"""
session = settings.Session()
TI = TaskInstance
tis = (
session
.query(TI)
.filter(TI.dag_id.in_(DAG_IDS))
.all()
)
for ti in tis:
logging.info('Deleting TaskInstance :: {}'.format(ti))
session.delete(ti)
session.commit()
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/gcp_vision_hook.py#L379-L404
|
def add_product_to_product_set(
self,
product_set_id,
product_id,
location=None,
project_id=None,
retry=None,
timeout=None,
metadata=None,
):
"""
"""
client = self.get_conn()
product_name = ProductSearchClient.product_path(project_id, location, product_id)
product_set_name = ProductSearchClient.product_set_path(project_id, location, product_set_id)
self.log.info('Add Product[name=%s] to Product Set[name=%s]', product_name, product_set_name)
client.add_product_to_product_set(
name=product_set_name, product=product_name, retry=retry, timeout=timeout, metadata=metadata
)
self.log.info('Product added to Product Set')
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/models/variable.py#L76-L99
|
def setdefault(cls, key, default, deserialize_json=False):
"""
"""
obj = Variable.get(key, default_var=None,
deserialize_json=deserialize_json)
if obj is None:
if default is not None:
Variable.set(key, default, serialize_json=deserialize_json)
return default
else:
raise ValueError('Default Value must be set')
else:
return obj
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/aws_athena_hook.py#L43-L51
|
def get_conn(self):
"""
"""
if not self.conn:
self.conn = self.get_client_type('athena')
return self.conn
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/operators/imap_attachment_to_s3_operator.py#L67-L88
|
def execute(self, context):
"""
"""
self.log.info(
'Transferring mail attachment %s from mail server via imap to s3 key %s...',
self.imap_attachment_name, self.s3_key
)
with ImapHook(imap_conn_id=self.imap_conn_id) as imap_hook:
imap_mail_attachments = imap_hook.retrieve_mail_attachments(
name=self.imap_attachment_name,
mail_folder=self.imap_mail_folder,
check_regex=self.imap_check_regex,
latest_only=True
)
s3_hook = S3Hook(aws_conn_id=self.s3_conn_id)
s3_hook.load_bytes(bytes_data=imap_mail_attachments[0][1], key=self.s3_key)
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/_vendor/nvd3/NVD3Chart.py#L419-L448
|
def create_x_axis(self, name, label=None, format=None, date=False, custom_format=False):
""""""
axis = {}
if custom_format and format:
axis['tickFormat'] = format
elif format:
if format == 'AM_PM':
axis['tickFormat'] = "function(d) { return get_am_pm(parseInt(d)); }"
else:
axis['tickFormat'] = "d3.format(',%s')" % format
if label:
axis['axisLabel'] = "'" + label + "'"
# date format : see https://github.com/mbostock/d3/wiki/Time-Formatting
if date:
self.dateformat = format
axis['tickFormat'] = ("function(d) { return d3.time.format('%s')"
"(new Date(parseInt(d))) }\n"
"" % self.dateformat)
# flag is the x Axis is a date
if name[0] == 'x':
self.x_axis_date = True
# Add new axis to list of axis
self.axislist[name] = axis
# Create x2Axis if focus_enable
if name == "xAxis" and self.focus_enable:
self.axislist['x2Axis'] = axis
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/bigquery_hook.py#L1948-L1959
|
def _bind_parameters(operation, parameters):
""" """
# inspired by MySQL Python Connector (conversion.py)
string_parameters = {}
for (name, value) in iteritems(parameters):
if value is None:
string_parameters[name] = 'NULL'
elif isinstance(value, basestring):
string_parameters[name] = "'" + _escape(value) + "'"
else:
string_parameters[name] = str(value)
return operation % string_parameters
|
https://github.com/soimort/you-get/blob/b746ac01c9f39de94cac2d56f665285b0523b974/src/you_get/extractors/vimeo.py#L22-L36
|
def vimeo_download_by_channel_id(channel_id, output_dir='.', merge=False, info_only=False, **kwargs):
""""""
html = get_content('https://api.vimeo.com/channels/{channel_id}/videos?access_token={access_token}'.format(channel_id=channel_id, access_token=access_token))
data = loads(html)
id_list = []
#print(data)
for i in data['data']:
id_list.append(match1(i['uri'], r'/videos/(\w+)'))
for id in id_list:
try:
vimeo_download_by_id(id, None, output_dir, merge, info_only, **kwargs)
except urllib.error.URLError as e:
log.w('{} failed with {}'.format(id, e))
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/sftp_hook.py#L189-L200
|
def store_file(self, remote_full_path, local_full_path):
"""
"""
conn = self.get_conn()
conn.put(local_full_path, remote_full_path)
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/hooks/hive_hooks.py#L731-L745
|
def table_exists(self, table_name, db='default'):
"""
"""
try:
self.get_table(table_name, db)
return True
except Exception:
return False
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/ftp_hook.py#L28-L58
|
def mlsd(conn, path="", facts=None):
"""
"""
facts = facts or []
if facts:
conn.sendcmd("OPTS MLST " + ";".join(facts) + ";")
if path:
cmd = "MLSD %s" % path
else:
cmd = "MLSD"
lines = []
conn.retrlines(cmd, lines.append)
for line in lines:
facts_found, _, name = line.rstrip(ftplib.CRLF).partition(' ')
entry = {}
for fact in facts_found[:-1].split(";"):
key, _, value = fact.partition("=")
entry[key.lower()] = value
yield (name, entry)
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/models/dagbag.py#L398-L416
|
def dagbag_report(self):
""""""
report = textwrap.dedent("""\n
-------------------------------------------------------------------
DagBag loading stats for {dag_folder}
-------------------------------------------------------------------
Number of DAGs: {dag_num}
Total task number: {task_num}
DagBag parsing time: {duration}
{table}
""")
stats = self.dagbag_stats
return report.format(
dag_folder=self.dag_folder,
duration=sum([o.duration for o in stats]),
dag_num=sum([o.dag_num for o in stats]),
task_num=sum([o.task_num for o in stats]),
table=pprinttable(stats),
)
|
https://github.com/soimort/you-get/blob/b746ac01c9f39de94cac2d56f665285b0523b974/src/you_get/extractors/veoh.py#L8-L16
|
def veoh_download(url, output_dir = '.', merge = False, info_only = False, **kwargs):
''''''
if re.match(r'http://www.veoh.com/watch/\w+', url):
item_id = match1(url, r'http://www.veoh.com/watch/(\w+)')
elif re.match(r'http://www.veoh.com/m/watch.php\?v=\.*', url):
item_id = match1(url, r'http://www.veoh.com/m/watch.php\?v=(\w+)')
else:
raise NotImplementedError('Cannot find item ID')
veoh_download_by_id(item_id, output_dir = '.', merge = False, info_only = info_only, **kwargs)
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/datadog_hook.py#L113-L158
|
def post_event(self, title, text, aggregation_key=None, alert_type=None, date_happened=None,
handle=None, priority=None, related_event_id=None, tags=None, device_name=None):
"""
"""
response = api.Event.create(
title=title,
text=text,
aggregation_key=aggregation_key,
alert_type=alert_type,
date_happened=date_happened,
handle=handle,
priority=priority,
related_event_id=related_event_id,
tags=tags,
host=self.host,
device_name=device_name,
source_type_name=self.source_type_name)
self.validate_response(response)
return response
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/operators/discord_webhook_operator.py#L85-L98
|
def execute(self, context):
"""
"""
self.hook = DiscordWebhookHook(
self.http_conn_id,
self.webhook_endpoint,
self.message,
self.username,
self.avatar_url,
self.tts,
self.proxy
)
self.hook.execute()
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/operators/mongo_to_s3.py#L106-L113
|
def _stringify(iterable, joinable='\n'):
"""
"""
return joinable.join(
[json.dumps(doc, default=json_util.default) for doc in iterable]
)
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/snowflake_hook.py#L107-L113
|
def get_conn(self):
"""
"""
conn_config = self._get_conn_params()
conn = snowflake.connector.connect(**conn_config)
return conn
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/_vendor/nvd3/NVD3Chart.py#L350-L361
|
def buildcontent(self):
"""
"""
self.buildcontainer()
# if the subclass has a method buildjs this method will be
# called instead of the method defined here
# when this subclass method is entered it does call
# the method buildjschart defined here
self.buildjschart()
self.htmlcontent = self.template_content_nvd3.render(chart=self)
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/_vendor/nvd3/NVD3Chart.py#L450-L465
|
def create_y_axis(self, name, label=None, format=None, custom_format=False):
"""
"""
axis = {}
if custom_format and format:
axis['tickFormat'] = format
elif format:
axis['tickFormat'] = "d3.format(',%s')" % format
if label:
axis['axisLabel'] = "'" + label + "'"
# Add new axis to list of axis
self.axislist[name] = axis
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/gcp_sql_hook.py#L97-L112
|
def get_instance(self, instance, project_id=None):
"""
"""
return self.get_conn().instances().get(
project=project_id,
instance=instance
).execute(num_retries=self.num_retries)
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/models/taskinstance.py#L614-L642
|
def next_retry_datetime(self):
"""
"""
delay = self.task.retry_delay
if self.task.retry_exponential_backoff:
min_backoff = int(delay.total_seconds() * (2 ** (self.try_number - 2)))
# deterministic per task instance
hash = int(hashlib.sha1("{}#{}#{}#{}".format(self.dag_id,
self.task_id,
self.execution_date,
self.try_number)
.encode('utf-8')).hexdigest(), 16)
# between 0.5 * delay * (2^retry_number) and 1.0 * delay * (2^retry_number)
modded_hash = min_backoff + hash % min_backoff
# timedelta has a maximum representable value. The exponentiation
# here means this value can be exceeded after a certain number
# of tries (around 50 if the initial delay is 1s, even fewer if
# the delay is larger). Cap the value here before creating a
# timedelta object so the operation doesn't fail.
delay_backoff_in_seconds = min(
modded_hash,
timedelta.max.total_seconds() - 1
)
delay = timedelta(seconds=delay_backoff_in_seconds)
if self.task.max_retry_delay:
delay = min(self.task.max_retry_delay, delay)
return self.end_date + delay
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/aws_athena_hook.py#L109-L140
|
def poll_query_status(self, query_execution_id, max_tries=None):
"""
"""
try_number = 1
final_query_state = None # Query state when query reaches final state or max_tries reached
while True:
query_state = self.check_query_status(query_execution_id)
if query_state is None:
self.log.info('Trial {try_number}: Invalid query state. Retrying again'.format(
try_number=try_number))
elif query_state in self.INTERMEDIATE_STATES:
self.log.info('Trial {try_number}: Query is still in an intermediate state - {state}'
.format(try_number=try_number, state=query_state))
else:
self.log.info('Trial {try_number}: Query execution completed. Final state is {state}'
.format(try_number=try_number, state=query_state))
final_query_state = query_state
break
if max_tries and try_number >= max_tries: # Break loop if max_tries reached
final_query_state = query_state
break
try_number += 1
sleep(self.sleep_time)
return final_query_state
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/datadog_hook.py#L62-L86
|
def send_metric(self, metric_name, datapoint, tags=None, type_=None, interval=None):
"""
"""
response = api.Metric.send(
metric=metric_name,
points=datapoint,
host=self.host,
tags=tags,
type=type_,
interval=interval)
self.validate_response(response)
return response
|
https://github.com/apache/airflow/blob/b69c686ad8a0c89b9136bb4b31767257eb7b2597/airflow/contrib/hooks/gcp_translate_hook.py#L34-L43
|
def get_conn(self):
"""
"""
if not self._client:
self._client = Client(credentials=self._get_credentials())
return self._client
|
https://github.com/soimort/you-get/blob/b746ac01c9f39de94cac2d56f665285b0523b974/src/you_get/common.py#L415-L454
|
def get_content(url, headers={}, decoded=True):
"""
"""
logging.debug('get_content: %s' % url)
req = request.Request(url, headers=headers)
if cookies:
cookies.add_cookie_header(req)
req.headers.update(req.unredirected_hdrs)
response = urlopen_with_retry(req)
data = response.read()
# Handle HTTP compression for gzip and deflate (zlib)
content_encoding = response.getheader('Content-Encoding')
if content_encoding == 'gzip':
data = ungzip(data)
elif content_encoding == 'deflate':
data = undeflate(data)
# Decode the response body
if decoded:
charset = match1(
response.getheader('Content-Type', ''), r'charset=([\w-]+)'
)
if charset is not None:
data = data.decode(charset, 'ignore')
else:
data = data.decode('utf-8', 'ignore')
return data
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.