I'm using apschduler
to call a task every 2hs, in this task it will read data from mysql db.
But after mysql's default wait_timeout 28800s
, it always raise Django.db.utils.OperationalError: (2013, 'Lost connection to MySQL server during query')
According from doc MySQL server has gone away, I think it should be child process issue.
But I still can't solve this problem
main.py
import sys, os
import django
import logging
import datetime
import argparse
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
sys.path.append(BASE_DIR)
sys.path.append(os.path.join(BASE_DIR,'src'))
sys.path.append(os.path.join(BASE_DIR,'data_model'))
if __name__ == '__main__':
ap = argparse.ArgumentParser()
ap.add_argument('-d', '--execute_dir', type=str,
help='exe_dir',
default=BASE_DIR)
args = ap.parse_args()
sys.path.append(os.path.join(args.execute_dir, "conf"))
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "django_settings")
django.setup()
from auto_management_v2 import auto_manage
from baseApscheduler import baseScheduler
scheduler = baseScheduler.scheduler
scheduler.add_job(
func=auto_manage,
trigger='interval',
hours=2,
start_date=(datetime.datetime.now() + datetime.timedelta(seconds=20)).strftime("%Y-%m-%d %H:%M:%S"),
id='auto_manage',
jobstore='default',
replace_existing=True)
scheduler.start()
baseApscheduler.py
import logging
from apscheduler.schedulers.blocking import BlockingScheduler
from apscheduler.jobstores.memory import MemoryJobStore
from apscheduler.executors.pool import ThreadPoolExecutor, ProcessPoolExecutor
from apscheduler.events import EVENT_JOB_ERROR, EVENT_JOB_EXECUTED
from django.conf import settings
from utils import sendEmail
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s %(filename)s[line:%(lineno)d] %(levelname)s %(message)s',
datefmt='%Y-%m-%d %H:%M:%S',
filename=settings.AUTO_MANAGEMENT_FILE_NAME,
filemode='a')
def event_listener(event):
if event.exception:
print("task failed!")
info = '''
time:{},
task_name: {},
fail_reason: {}
'''.format(str(event.scheduled_run_time), str(event.job_id), str(event.exception))
sendEmail(message=info, subject='task failed')
else:
print(event.job_id + "task successed!")
class BaseScheduler():
def __init__(self):
self.executors = {
'default': ThreadPoolExecutor(10),
'processPool': ProcessPoolExecutor(3)
}
self.jobstores = {
'default': MemoryJobStore(),
}
self.scheduler = BlockingScheduler(
jobstores=self.jobstores, executors=self.executors)
self.scheduler._logger = logging
self.scheduler.add_listener(
event_listener,
EVENT_JOB_ERROR | EVENT_JOB_EXECUTED)
baseScheduler = BaseScheduler()
auto_management_v2.py
import logging
import datetime
import json
import requests
import urllib
import asyncio
from django.db.utils import OperationalError
from django.conf import settings
django.db.connections.close_all() # try to close old connection here
from data_model.models import SmbAmmFilter, SmbAmmRule, SmbAmmCampaignFbRule, SmbAmmRuleCampaign
info_logger = logging.getLogger('auto_manage_info')
class AutoManagementServiceV2(object):
def __init__(self):
self.plan_lists = SmbAmmRuleCampaign.objects.filter(status=1)
self.chunk_size = 50 #
def run(self):
new_loop = asyncio.new_event_loop()
self.iter_plans = (
self.plan_lists[i:i + self.chunk_size] for i in range(0, len(self.plan_lists), self.chunk_size)
)
asyncio.set_event_loop(new_loop)
for chunk_plans in self.iter_plans:
task_list = [
asyncio.ensure_future(self._handle_campaign_data(campaign_plan)) for campaign_plan in chunk_plans
]
new_loop.run_until_complete(asyncio.gather(*task_list))
def auto_manage():
AutoManagementServiceV2().run()
if __name__ == '__main__':
auto_manage()
As ablove codes showed, I think i've close the mysql connection in my task, and hope it do a reconnection work, but always failed..
How can i reconnect to db ervery time I run my task correctly?
Any commentary is very welcome. great thanks.
The problem solved after adding django.db.connections.close_all()
in my task auto_manage
def auto_manage():
django.db.connections.close_all()
AutoManagementServiceV2().run()