Spring Boot Setup And Demo

For this demo, we will create a spring boot application with common dependencies - Web, JPA, Lombok, PostgreSQL, actuator etc.

How it works

  • The whole purpose of the master and replica databases are to distribute the load on the databases.

  • Every write operation must go to master database and all the read operation should come from replications.

  • Since all the master and replica databases are the same and have async replications, then eventually all the data would be the same.

  • Whenever any write operation will happen in master instance, database will get sync with other replications.

  • So we need to create connection with master and all the replica databases at the same time. We will follow multiple data source connection and decide the data source required based on transaction operation.

  • To determine which data source to select, we will be utilizing spring provided transaction along with some custom annotations.

  • Whenever any transaction is marked as read-only @Transaction(readOnly=true) , the system will route to replication and without read-only to master.

  • To mark certain APIs or methods to use forcefully replica or master data source, we will be writing custom annotations.

  • Since the whole operation would be thread base, so will use a custom DataSourceContextHolder to mark current transaction as for replica or master

Project Structure

Application Properties

Here we have configured the dataSouce properties for master and replica databases.

We have added the hikari configuration as separated as we will configure dataSource manually.

# Application Configuration
server:
  port: 8080

# Logging Configuration
logging:
  level:
    com.app: DEBUG
    org.springframework.jdbc: DEBUG
    org.springframework.transaction: DEBUG
    com.zaxxer.hikari: info
#  pattern:
#    console: "%d{yyyy-MM-dd HH:mm:ss} - %msg%n"
#    file: "%d{yyyy-MM-dd HH:mm:ss} [%thread] %-5level %logger{36} - %msg%n"
  file:
    name: logs/application.log

  # Master Database Configuration
spring:
  datasource:
    master:
      url: jdbc:postgresql://192.168.1.17:6000/readreplicademo
      username: ${MASTER_DB_USER:postgres}
      password: ${MASTER_DB_PASSWORD:root}
      driver-class-name: org.postgresql.Driver
    # Replica Database Configurations
    replica1:
      url: jdbc:postgresql://192.168.1.17:6000/readreplicademo
      username: ${MASTER_DB_USER:postgres}
      password: ${MASTER_DB_PASSWORD:root}
      driver-class-name: org.postgresql.Driver

    replica2:
      url: jdbc:postgresql://192.168.1.17:6000/readreplicademo
      username: ${MASTER_DB_USER:postgres}
      password: ${MASTER_DB_PASSWORD:root}
      driver-class-name: org.postgresql.Driver

    replica3:
      url: jdbc:postgresql://192.168.1.17:6000/readreplicademo
      username: ${MASTER_DB_USER:postgres}
      password: ${MASTER_DB_PASSWORD:root}
      driver-class-name: org.postgresql.Driver

  # Transaction Configuration
  transaction:
    default-timeout: 30
    rollback-on-commit-failure: true

  # JPA Configuration (if using JPA)
  jpa:
    hibernate:
      ddl-auto: update
    show-sql: false
    properties:
      hibernate:
        dialect: org.hibernate.dialect.PostgreSQLDialect
        format_sql: true
        use_sql_comments: true
        jdbc:
          batch_size: 20
        connection:
          provider_disables_autocommit: true


  threads:
    virtual:
      enabled: true

  # Retry Configuration
#  retry:
#    enabled: true

# Management & Monitoring
management:
  endpoints:
    web:
      exposure:
        include: health,info,metrics,prometheus
  endpoint:
    health:
      show-details: always
    metrics:
      access: read_only
  health:
    db:
      enabled: true
  metrics:
    enable:
      all: true
#  metrics:
#    export:
#      prometheus:
#        enabled: true

# Custom Application Properties
app:
  database:
    read-timeout: 30
    write-timeout: 60
    health-check-interval: 30000
    connection-retry-attempts: 3
    connection-retry-delay: 2000

  async:
    core-pool-size: 10
    max-pool-size: 50
    queue-capacity: 100
    keep-alive-seconds: 60

  scheduling:
    pool-size: 5
    health-check-cron: "0 */1 * * * *"  # Every minute
    maintenance-cron: "0 0 2 * * *"     # Daily at 2 AM

  hikari:
    auto-commit: false
    connection-timeout: 20000
    idle-timeout: 300000
    max-lifetime: 1200000
    maximum-pool-size: 20
    minimum-idle: 5

#  circuit-breaker:
#    enabled: true
#    failure-threshold: 5
#    timeout: 10000
#    reset-timeout: 60000

DataSouceConfig

We will read the properties file and create beans of master and replica datasources

We will use RoutingDataSource that helps in routing the database connection between master and replicas.


@RequiredArgsConstructor
@Configuration(proxyBeanMethods = false)
@EnableTransactionManagement
@EnableJpaRepositories(
        basePackages = "com.app.readreplica.repositories",
        entityManagerFactoryRef = "entityManagerFactory",
        transactionManagerRef = "transactionManager"
)
public class DataSourceConfig {

    private final ApplicationProperties applicationProperties;
    
    // reading master datasource properties
    @Bean(name = "masterDataSourceProperties")
    @ConfigurationProperties(prefix = "spring.datasource.master")
    public DataSourceProperties masterDataSourceProperties() {
        return new DataSourceProperties();
    }

    // creating HikariDatasource for master 
    @Bean(name = "masterDataSource")
    public DataSource masterDataSource(@Qualifier("masterDataSourceProperties") DataSourceProperties properties) {
        var ds= properties.initializeDataSourceBuilder().type(HikariDataSource.class).build();
        populateHikariConfig((HikariDataSource) ds);
        ds.setPoolName("MasterCP");
        return ds;
    }

    // reading replica datasource properties
    @Bean(name = "replica1DataSourceProperties")
    @ConfigurationProperties(prefix = "spring.datasource.replica1")
    public DataSourceProperties replica1DataSourceProperties() {
        return new DataSourceProperties();
    }

    // replica hikari datasource
    @Bean(name = "replica1DataSource")
    public DataSource replica1DataSource(@Qualifier("replica1DataSourceProperties") DataSourceProperties properties) {
        var ds= properties.initializeDataSourceBuilder().type(HikariDataSource.class).build();
        populateHikariConfig((HikariDataSource) ds);
        ds.setPoolName("Replica1CP");
        return ds;
    }

    @Bean(name = "replica2DataSourceProperties")
    @ConfigurationProperties(prefix = "spring.datasource.replica2")
    public DataSourceProperties replica2DataSourceProperties() {
        return new DataSourceProperties();
    }

    @Bean(name = "replica2DataSource")
    public DataSource replica2DataSource(@Qualifier("replica2DataSourceProperties") DataSourceProperties properties) {
        var ds= properties.initializeDataSourceBuilder().type(HikariDataSource.class).build();
        populateHikariConfig((HikariDataSource) ds);
        ds.setPoolName("Replica2");
        return ds;
    }

    @Bean(name = "replica3DataSourceProperties")
    @ConfigurationProperties(prefix = "spring.datasource.replica3")
    public DataSourceProperties replica3DataSourceProperties() {
        return new DataSourceProperties();
    }
//
    @Bean(name = "replica3DataSource")
    public DataSource replica3DataSource(@Qualifier("replica3DataSourceProperties") DataSourceProperties properties) {
        var ds= properties.initializeDataSourceBuilder().type(HikariDataSource.class).build();
        populateHikariConfig((HikariDataSource) ds);
        ds.setPoolName("MasterCP");
        return ds;
    }


    private void populateHikariConfig(HikariDataSource ds) {
        var hikari = applicationProperties.getHikari();
        ds.setAutoCommit(hikari.isAutoCommit());
        ds.setConnectionTimeout(hikari.getConnectionTimeout());
        ds.setIdleTimeout(hikari.getIdleTimeout());
        ds.setMaxLifetime(hikari.getMaxLifetime());
        ds.setMaximumPoolSize(hikari.getMaximumPoolSize());
        ds.setMinimumIdle(hikari.getMinimumIdle());
    }


    // bean to all the datasources
    @Bean(name = "replicaDataSources")
    public List<DataSource> replicaDataSources(
            @Qualifier("replica1DataSource") DataSource replica1,
            @Qualifier("replica2DataSource") DataSource replica2,
            @Qualifier("replica3DataSource") DataSource replica3) {
        return List.of(replica1, replica2, replica3);
    }

    // Routing datasource bean that will pick the correct datasoruce based on transaction choice
    @Bean(name = "routingDataSource")
    public DataSource routingDataSource(@Qualifier("masterDataSource") DataSource masterDataSource,
            @Qualifier("replicaDataSources") List<DataSource> replicaDataSources) {

        ReadWriteRoutingDataSource routingDataSource = new ReadWriteRoutingDataSource();
        routingDataSource.setMasterDataSource(masterDataSource);
        routingDataSource.setReplicaDataSources(replicaDataSources);
        routingDataSource.afterPropertiesSet();

        return routingDataSource;
    }

    // now creating main datasource object , with instance of routing data source
    @Primary
    @Bean(name = "dataSource")
    public DataSource dataSource(@Qualifier("routingDataSource") DataSource routingDataSource) {
        // LazyConnectionDataSourceProxy ensures connection is obtained only when needed
        return new LazyConnectionDataSourceProxy(routingDataSource);
    }

Routing DataSource

This class is responsible for selecting data source based on the transaction type of query.

It has instances of master and replica data sources and take helpes of DataSourceContextHolder class to determine the user selected choice.

package com.app.readreplica.config.database;

import lombok.Getter;
import lombok.Setter;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.jdbc.datasource.lookup.AbstractRoutingDataSource;
import org.springframework.lang.NonNullApi;
import org.springframework.transaction.support.TransactionSynchronizationManager;

import javax.sql.DataSource;
import java.sql.Connection;
import java.sql.SQLException;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.concurrent.atomic.AtomicInteger;

@Getter
@Setter
public class ReadWriteRoutingDataSource extends AbstractRoutingDataSource {
    
    private static final Logger logger = LoggerFactory.getLogger(ReadWriteRoutingDataSource.class);
    
    private DataSource masterDataSource;
    private List<DataSource> replicaDataSources;
    private final AtomicInteger replicaCounter = new AtomicInteger(0);
    private final ThreadLocal<String> forceDataSource = new ThreadLocal<>();


    @Override
    public void afterPropertiesSet() {
        Map<Object, Object> targetDataSources = new HashMap<>();
        
        // Add master datasource
        targetDataSources.put(DataSourceType.MASTER, masterDataSource);
        
        // Add replica data-sources
        for (int i = 0; i < replicaDataSources.size(); i++) {
            targetDataSources.put(DataSourceType.REPLICA.name() + "_" + i, replicaDataSources.get(i));
        }
        
        setTargetDataSources(targetDataSources);
        setDefaultTargetDataSource(masterDataSource);
        super.afterPropertiesSet();
    }
    
    // calls before cometting the transaction to get the key datasource
    @Override
    protected Object determineCurrentLookupKey() {

        if(DataSourceContextHolder.isMasterRequired()){
            logger.debug("Using master datasource for write transaction");
            return DataSourceType.MASTER;
        }

        if(DataSourceContextHolder.isReplicaRequired() || TransactionSynchronizationManager.isCurrentTransactionReadOnly()){
            String replicaKey = selectReadReplica();
            logger.debug("Using read replica: {}", replicaKey);
            return replicaKey;
        }

        if(TransactionSynchronizationManager.isActualTransactionActive() ){
            logger.debug("Using master datasource for write transaction");
            return DataSourceType.MASTER;
        }

        // 3. Default to master
        return DataSourceType.MASTER;
//        return selectReadReplica();
    }
    
    private String selectReadReplica() {
        if (replicaDataSources.isEmpty()) {
            logger.warn("No replica datasources available, falling back to master");
            return DataSourceType.MASTER.name();
        }
        
        // Round-robin load balancing with failover based on health check
        for (int i = 0; i < replicaDataSources.size(); i++) {
            int index = replicaCounter.getAndIncrement() % replicaDataSources.size();
            String replicaKey = DataSourceType.REPLICA.name() + "_" + index;

            // Check if replica is healthy
            if (isDataSourceHealthy(replicaDataSources.get(index))) {
                return replicaKey;
            }

            logger.warn("Replica {} is unhealthy, trying next replica", replicaKey);
        }

        // All replicas failed, fallback to master
        logger.error("All replica datasources are unhealthy, falling back to master");
        return DataSourceType.MASTER.name();


        // Random load balancing
//        int index = Math.abs(replicaCounter.getAndIncrement() % replicaDataSources.size());
//        Object key = index;
//        logger.debug("Routing to REPLICA index: {}, key: {}", index, DataSourceType.REPLICA.name() + "_" + key);
//        return DataSourceType.REPLICA.name() + "_" + index;

    }
    
    private boolean isDataSourceHealthy(DataSource dataSource) {
        try (Connection connection = dataSource.getConnection()) {
            return connection.isValid(1); // 1 second timeout
        } catch (SQLException e) {
            logger.error("Health check failed for replica datasource", e);
            return false;
        }
    }


}

DataSourceContextHolder

A helper that store the current choice of routing decision. ReadWriteRoutingDataSource uses this class to determine the current lookup key.

public enum DataSourceType {
    MASTER, REPLICA
}


/**
 * Uses a ThreadLocal to store the routing decision for the current thread.
 * This allows the AOP aspect to signal the routing data source to use a replica.
 */
public class DataSourceContextHolder {

    private static final ThreadLocal<Boolean> replicaRequestHolder = new ThreadLocal<>();
    private static final ThreadLocal<Boolean> masterRequestHolder = new ThreadLocal<>();

    public static void setReplicaRequired(boolean isReplicaRequired) {
        replicaRequestHolder.set(isReplicaRequired);
    }

    public static boolean isReplicaRequired() {
        return replicaRequestHolder.get() != null && replicaRequestHolder.get();
    }

    public static void clearReplica() {
        replicaRequestHolder.remove();
    }

    public static void setMasterRequired(boolean isMasterRequired) {
        masterRequestHolder.set(isMasterRequired);
    }

    public static boolean isMasterRequired() {
        return masterRequestHolder.get() != null && masterRequestHolder.get();
    }

    public static void clearMaster() {
        masterRequestHolder.remove();
    }
}

Annotations

@Target({ElementType.METHOD})
@Retention(RetentionPolicy.RUNTIME)
public @interface ForceMaster {
    /**
     * Force using master datasource even for read operations
     */
}


/**
 * Annotation to force a read-only transaction.
 *
 * @Author saurabh vaish
 * @Date 23-07-2025
 */
@Target({ElementType.METHOD, ElementType.TYPE})
@Retention(RetentionPolicy.RUNTIME)
public @interface ReadOnlyReplica {

}

Aspect to perform action based on these annotations

So when ever any api or method is marked with the above annotations this aspect will check and based on the annotation it will set the routing choice in DataSourceContextHolder. When actual datasource decision will happen the updated context will force the routing to go to master or replica

@Aspect
@Component
@Order(0)
public class ReadOnlyRouteAspect {

    @Before("@annotation(com.app.readreplica.config.database.annotations.ReadOnlyReplica)")
    public void setReadDataSourceType() {
        DataSourceContextHolder.setReplicaRequired(true);
    }

    @After("@annotation(com.app.readreplica.config.database.annotations.ReadOnlyReplica)")
    public void clearDataSourceType() {
        DataSourceContextHolder.clearReplica();
    }

    @AfterThrowing(pointcut = "@annotation(com.app.readreplica.config.database.annotations.ReadOnlyReplica)", throwing = "ex")
    public void clearAfterException(Throwable ex) {
        DataSourceContextHolder.clearReplica();
    }

    @Before("@annotation(com.app.readreplica.config.database.annotations.ForceMaster)")
    public void setForceMasterDataSourceType() {
        DataSourceContextHolder.setMasterRequired(true);
    }

    @After("@annotation(com.app.readreplica.config.database.annotations.ForceMaster)")
    public void clearForceMasterDataSourceType() {
        DataSourceContextHolder.clearMaster();
    }

    @AfterThrowing(pointcut = "@annotation(com.app.readreplica.config.database.annotations.ForceMaster)", throwing = "ex")
    public void clearForceMasterAfterException(Throwable ex) {
        DataSourceContextHolder.clearMaster();
    }


}

Example Service


// routed to master   
 @Transactional
    public Token createToken() {
        log.info("Starting token creation process");
        String tokenValue = generateRandomToken();
        Token token = new Token();
        token.setToken(tokenValue);
        log.info("Creating new token with value: {}", tokenValue);

        Token savedToken = tokenRepository.save(token);
        log.info("Successfully created token with id: {} and value: {}", savedToken.getId(), tokenValue);
        return savedToken;
    }
    
    // read from replica, here either one of is requied
    @ReadOnlyReplica
    @Transactional(readOnly = true)
    public List<Token> findAll() {
        log.info("Starting retrieval of all tokens");
        List<Token> tokens = tokenRepository.findAll();
//        Thread.sleep(1000);
        log.info("Successfully retrieved {} tokens", tokens.size());
        log.debug("Token count details: {}", tokens.size());
        return tokens;
    }

    // will force read operation to go master even if its read only
    @ForceMaster
    @Transactional(readOnly = true)
    public Optional<Token> findById(Long id) {
        log.info("Starting search for token with id: {}", id);
        Optional<Token> result = tokenRepository.findById(id);
        if (result.isPresent()) {
            log.info("Successfully found token with id: {}", id);
            log.debug("Token details - id: {}, value: {}", result.get().getId(), result.get().getToken());
        } else {
            log.warn("No token found with id: {}", id);
        }
        return result;
    }

    

    // routed to master
    @Transactional
    public Token updateToken(Long id, String newTokenValue) {
        log.info("Starting token update for id: {} with new value: {}", id,
                 newTokenValue != null ? "provided" : "auto-generated");

        Optional<Token> tokenOpt = tokenRepository.findById(id);
        if (tokenOpt.isPresent()) {
            Token token = tokenOpt.get();
            String oldValue = token.getToken();
            String finalTokenValue = newTokenValue != null ? newTokenValue : generateRandomToken();
            token.setToken(finalTokenValue);

            log.info("Updating token id: {} - old value: {}, new value: {}", id, oldValue, finalTokenValue);
            Token updatedToken = tokenRepository.save(token);
            log.info("Successfully updated token with id: {}", id);
            return updatedToken;
        } else {
            log.error("Token not found with id: {} for update operation", id);
            throw new TokenNotFoundException(id);
        }
    }

Last updated