I have a market data Spring Boot application that runs everyday and shuts itself down in the evening. After the app shuts down, I want to process the market data messages which were logged. In order to do that I was looking for a way to be able to rollover the log file at shutdown instead of startup so that my post processing app has a consistent way to read the files.
Currently, I have created a custom triggering policy and a using the following logback config for market data related messages
@NoAutoStart
public class CustomTriggerPolicy<E> extends SizeAndTimeBasedFNATP<E> {
@Override
public boolean isTriggeringEvent(File activeFile, E event) {
boolean triggeringEvent = super.isTriggeringEvent(activeFile, event);
if(!triggeringEvent) {
elapsedPeriodsFileName = getCurrentPeriodsFileNameWithoutCompressionSuffix();
}
return triggeringEvent;
}
}
logback.xml
<configuration>
<statusListener class="ch.qos.logback.core.status.OnConsoleStatusListener"/>
<shutdownHook class="ch.qos.logback.core.hook.DelayingShutdownHook">
<delay>60000</delay>
</shutdownHook>
<appender name="MKT-DATA-FIX-LOG" class="ch.qos.logback.core.rolling.RollingFileAppender">
<file>${LOG_HOME}/mkt-data-fix.log</file>
<append>false</append>
<encoder class="ch.qos.logback.classic.encoder.PatternLayoutEncoder">
<Pattern>%m%n</Pattern>
</encoder>
<!-- rollover daily -->
<rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy">
<fileNamePattern>${LOG_HOME}/archived/foo/mkt-data-fix.%d{yyyy-MM-dd}.%i.gz</fileNamePattern>
<timeBasedFileNamingAndTriggeringPolicy class="com.mkt.data.CustomTriggerPolicy">
<maxFileSize>100GB</maxFileSize>
</timeBasedFileNamingAndTriggeringPolicy>
</rollingPolicy>
</appender>
</configuration>
SpringBootApplication.class (Main class)
@PreDestroy
public void shutdown() {
manualLogRollover();
}
public void manualLogRollover() {
try {
Set<Appender<ILoggingEvent>> loggingAppenders = AppenderUtils.getLoggingAppenders();
AppenderUtils.rolloverAppenders(loggingAppenders);
} catch (Exception e) {
log.warn("Log rollover failed", e);
}
}
AppenderUtils.java
public class AppenderUtils {
public static Set<Appender<ILoggingEvent>> getLoggingAppenders() {
LoggerContext context = (LoggerContext) LoggerFactory.getILoggerFactory();
Set<Appender<ILoggingEvent>> fileLogAppenders = new HashSet<>();
for(Logger logger : context.getLoggerList()) {
for(Iterator<Appender<ILoggingEvent>> index = logger.iteratorForAppenders(); index.hasNext();) {
Appender<ILoggingEvent> appender = index.next();
if(appender instanceof RollingFileAppender) {
if(appender.getName().equals("MKT-DATA-FIX-LOG") ||
appender.getName().equals("MKT-DATA-FIX-EVENT-LOG")) {
fileLogAppenders.add(appender);
}
}
}
}
return fileLogAppenders;
}
public static void rolloverAppenders(Set<Appender<ILoggingEvent>> appenders) {
for (Appender<ILoggingEvent> appender : appenders) {
if (appender instanceof RollingFileAppender) {
((RollingFileAppender<ILoggingEvent>) appender).rollover();
}
}
}
}
So far I've done the following things;
18:16:58,872 |-ERROR in c.q.l.core.rolling.TimeBasedRollingPolicy@398887205 - Timeout while waiting for compression job to finish java.util.concurrent.TimeoutException
at java.util.concurrent.TimeoutException
at at java.util.concurrent.FutureTask.get(FutureTask.java:205)
at at ch.qos.logback.core.rolling.TimeBasedRollingPolicy.waitForAsynchronousJobToStop(TimeBasedRollingPolicy.java:137)
at at ch.qos.logback.core.rolling.TimeBasedRollingPolicy.stop(TimeBasedRollingPolicy.java:129)
at at ch.qos.logback.core.rolling.RollingFileAppender.stop(RollingFileAppender.java:152)
at at ch.qos.logback.core.spi.AppenderAttachableImpl.detachAndStopAllAppenders(AppenderAttachableImpl.java:107)
at at ch.qos.logback.classic.Logger.detachAndStopAllAppenders(Logger.java:206)
at at ch.qos.logback.classic.Logger.recursiveReset(Logger.java:331)iam
at at ch.qos.logback.classic.Logger.recursiveReset(Logger.java:338)
at at ch.qos.logback.classic.Logger.recursiveReset(Logger.java:338)
at at ch.qos.logback.classic.Logger.recursiveReset(Logger.java:338)
at at ch.qos.logback.classic.Logger.recursiveReset(Logger.java:338)
at at ch.qos.logback.classic.LoggerContext.reset(LoggerContext.java:223)
at at ch.qos.logback.classic.LoggerContext.stop(LoggerContext.java:348)
at at ch.qos.logback.core.hook.ShutdownHookBase.stop(ShutdownHookBase.java:39)
at at ch.qos.logback.core.hook.DelayingShutdownHook.run(DelayingShutdownHook.java:57)
at at java.lang.Thread.run(Thread.java:748)
I am out of options. Does anyone know how to achieve what I am looking for here?
I think I figured it out. I had to define my own triggering and rolling policy that rolled the appenders and compressed the files synchronously.
I don't need to do the rollover as a part of @PreDestroy in my main class.
@NoAutoStart
public class CustomTriggerPolicy<E> extends SizeAndTimeBasedFNATP<E> {
@Override
public boolean isTriggeringEvent(File activeFile, E event) {
boolean triggeringEvent = super.isTriggeringEvent(activeFile, event);
if(!triggeringEvent) {
elapsedPeriodsFileName = getCurrentPeriodsFileNameWithoutCompressionSuffix();
}
return triggeringEvent;
}
}
import ch.qos.logback.core.rolling.RolloverFailure;
import ch.qos.logback.core.rolling.TimeBasedRollingPolicy;
import ch.qos.logback.core.rolling.helper.*;
import java.io.BufferedWriter;
import java.io.File;
import java.io.FileWriter;
import java.io.IOException;
/**
* Custom rolling policy where we trigger rollback at shutdown.
*/
public class MktDataRollingPolicy<E> extends TimeBasedRollingPolicy<E> {
private static final String LOGBACK_STATUS_FILE_NAME = "logback-status.log";
private Compressor compressor;
private RenameUtil renameUtil = new RenameUtil();
private String basePath;
private String logbackStatusFilePath;
FileNamePattern zipEntryFileNamePattern;
@Override
public void start() {
basePath = System.getProperty("logback.statusLog", "/home/master/logs/mkt-data-log");
logbackStatusFilePath = basePath + File.separator + LOGBACK_STATUS_FILE_NAME;
super.start();
// set the LR for our utility object
renameUtil.setContext(this.context);
compressor = new Compressor(compressionMode);
compressor.setContext(context);
if (compressionMode == CompressionMode.ZIP) {
String zipEntryFileNamePatternStr = transformFileNamePattern2ZipEntry(fileNamePatternStr);
zipEntryFileNamePattern = new FileNamePattern(zipEntryFileNamePatternStr, context);
}
Runtime.getRuntime().addShutdownHook(new Thread(new ShutdownHookRunnable()));
}
@Override
public void rollover() throws RolloverFailure {
String elapsedPeriodsFileName = getTimeBasedFileNamingAndTriggeringPolicy().getElapsedPeriodsFileName();
if (elapsedPeriodsFileName != null) {
if (compressionMode == CompressionMode.NONE) {
if (getParentsRawFileProperty() != null) {
renameUtil.rename(getParentsRawFileProperty(), elapsedPeriodsFileName);
} // else { nothing to do if CompressionMode == NONE and parentsRawFileProperty == null }
} else {
String elapsedPeriodStem = FileFilterUtil.afterLastSlash(elapsedPeriodsFileName);
if (getParentsRawFileProperty() == null) {
compressor.compress(elapsedPeriodsFileName, elapsedPeriodsFileName, elapsedPeriodStem);
} else {
if (isLogFileEmpty()) {
addInfo("Skipping compression for empty file: " + getParentsRawFileProperty());
return;
}
renameRawAndCompress(elapsedPeriodsFileName, elapsedPeriodStem);
}
}
} else {
String msg = String.format("ElapsedPeriodsFileName is null. Skipping rollover for file: %s",
getParentsRawFileProperty());
addWarn(msg);
File statusFile = new File(logbackStatusFilePath);
try (BufferedWriter bw = new BufferedWriter(new FileWriter(statusFile, true))) {
bw.append(msg);
bw.append("\n");
} catch (IOException e) {
System.out.println("Failed to write to status file: " + logbackStatusFilePath);
e.printStackTrace();
}
}
}
private boolean isLogFileEmpty() {
File f = new File(getParentsRawFileProperty());
return f.exists() && f.length() == 0;
}
private String transformFileNamePattern2ZipEntry(String fileNamePatternStr) {
String slashified = FileFilterUtil.slashify(fileNamePatternStr);
return FileFilterUtil.afterLastSlash(slashified);
}
void renameRawAndCompress(String nameOfCompressedFile, String innerEntryName) throws RolloverFailure {
String parentsRawFile = getParentsRawFileProperty();
String tmpTarget = nameOfCompressedFile + System.nanoTime() + ".tmp";
renameUtil.rename(parentsRawFile, tmpTarget);
compressor.compress(tmpTarget, nameOfCompressedFile, innerEntryName);
}
class ShutdownHookRunnable implements Runnable {
@Override
public void run() {
try {
addInfo("Calling rollover inside shutdown hook");
rollover();
} catch (Exception e) {
addError("Failed to rollover", e);
}
}
}
}
logback.xml
<configuration>
<appender name="MKT-DATA-FIX-LOG" class="ch.qos.logback.core.rolling.RollingFileAppender">
<file>${LOG_HOME}/mkt-data-fix.log</file>
<append>false</append>
<encoder class="ch.qos.logback.classic.encoder.PatternLayoutEncoder">
<Pattern>%m%n</Pattern>
</encoder>
<!-- rollover daily -->
<rollingPolicy class="com.myapp.logback.MktDataRollingPolicy">
<fileNamePattern>${LOG_HOME}/archived/foo/mkt-data-fix.%d{yyyy-MM-dd}.%i.gz</fileNamePattern>
<timeBasedFileNamingAndTriggeringPolicy class="com.mkt.data.CustomTriggerPolicy">
<maxFileSize>100GB</maxFileSize>
</timeBasedFileNamingAndTriggeringPolicy>
</rollingPolicy>
</appender>
</configuration>