Rev 7070 | Blame | Compare with Previous | Last modification | View Log | RSS feed
package com.erggroup.buildtool.ripple;import java.sql.Array;import java.sql.CallableStatement;import java.sql.Clob;import java.sql.Connection;import java.sql.DriverManager;import java.sql.PreparedStatement;import java.sql.ResultSet;import java.sql.SQLException;import java.sql.Timestamp;import java.sql.Types;import java.util.ArrayList;import java.util.Arrays;import java.util.Calendar;import java.util.Date;import java.util.GregorianCalendar;import java.util.Iterator;import java.util.List;import java.util.ListIterator;import java.util.concurrent.locks.ReentrantLock;import oracle.sql.ArrayDescriptor;import org.slf4j.Logger;import org.slf4j.LoggerFactory;import com.erggroup.buildtool.ripple.RunLevel.BuildState;import com.erggroup.buildtool.utilities.MutableDate;/**Release Manager schema abstraction*/public class ReleaseManager implements Cloneable{/**Unit test hook.* Prevents Oracle interaction when false.* @attribute*/public boolean mUseDatabase = true;/**Debug Support.* Set to false if EnvVar GBE_BUILDTOOL_DEBUG exists* Value of GBE_BUILDTOOL_DEBUG is expected to be an email if greater than* one character.** When set to false will:* Prevents use of Oracle Mutex and potential lockout during debug sessions.* Prevent the initiation of an indefinite pause.* Force abt_usetestarchive, Add -testArchive to the assemble_dpkg command* @attribute*/private static boolean mUseMutex = true;/*** Reasons for building a package* The character code is inserted directly into the Release Manager Database**/public enum BuildReason {Ripple('R'),Test('T'),NewVersion('N'),Restore('P');private char reason;private BuildReason(char reason) { this.reason = reason; }@Overridepublic String toString() { return String.valueOf(reason); }public static BuildReason fromValue(String value) {if (value != null) {for (BuildReason vReason : values()) {if (vReason.reason == value.charAt(0) ) {return vReason;}}}return null;}}/*** The build result : Error, Complete ...* The character code is inserted directly into the Release Manager Database*/public enum BuildResult {Building('B'),BuildError('E'),Complete('C'),SystemError('S');private char state;private BuildResult(char state) { this.state = state; }@Overridepublic String toString() { return String.valueOf(state); }// public static BuildResult fromValue(String value) {// if (value != null) {// for (BuildResult vState : values()) {// if (vState.state == value.charAt(0) ) {// return vState;// }// }// }// return null;// }}/**package object of no consequence* @attribute*/public static final Package NULL_PACKAGE = new Package();/**Logger* @attribute*/private static final Logger mLogger = LoggerFactory.getLogger(ReleaseManager.class);/** Class to contain a lock and a connection as a single entity**/static class RmConnection {/*** Connection to the database*/public Connection mConnection = null;/*** Lock used to control access to the connection*/public final ReentrantLock mSession = new ReentrantLock();/*** Close the connection on disconnect*/public boolean mCloseOnDisconnect = false;public RmConnection ( boolean closeMe){mCloseOnDisconnect = closeMe;}/*** Close the connection - do not throw an error*/void closeConnection(){closeConnection(mConnection);}/*** Close the specified connection - do not throw an error.* To be used for general error handling* @param connection*/static void closeConnection( Connection connection ){try {if (connection != null)connection.close();}catch (SQLException e){mLogger.error("SQL Exception closing connection: {}", e.getMessage());}}}/**database connection for use when not planning a build* @attribute*/private static RmConnection mNonPlanningConnection = new RmConnection(false);/**database session handle for use when planning a build* @attribute*/private static RmConnection mPlanningConnection = new RmConnection(true);/**database session handle* note this handle is only ever set to mNonPlanningConnection or mPlanningConnection* @attribute*/private Connection mConnection = null;private boolean mIsConnected = false;/**thread synchronization governing database connection request queuing* this lock is used by master threads with a low priority planning requirement* use the fairness parameter to grant access to the longest waiting thread* @attribute*/private static final ReentrantLock mLowPriorityQueue = new ReentrantLock(true);/**collection of ReleaseConfig objects* @attribute*/public ReleaseConfigData mReleaseConfigCollection = new ReleaseConfigData();/**database connection string* @attribute*/public String mConnectionString = "";/**database username* @attribute*/private String mUsername = "";/**database password* @attribute*/private String mPassword = "";/**collection of RunLevel objects* @attribute*/public List<RunLevelData> mRunLevelCollection = new ArrayList<RunLevelData>();/**set in claimVersion, cleared in discardVersion* @attribute*/private String mPlannedPkgId = null;/**set in claimVersion, cleared in discardVersion* @attribute*/private String mPlannedPkgVersion = null;/**prevents inadvertently attempting a commit which releases record locks in between claimMutex and releaseMutex* @attribute*/private boolean mDoNotCommit = false;/*** Controls the data collection mode.* True : daemon Mode (default)* False: escrow Mode*/public boolean mDaemon = true;/*** Indication of the state of the RM mutex* Used only for reporting*/public String mMutexState = "";/*** The number of seconds to postpone a shut down of a daemon set* after the thread has started.** This will prevent a daemon from being automatically disabled as soon* as it is restarted** Set to four hours*/private static final long mPostponeShutdown = 4L * 60L * 60L;/**constructor* @param connectionString* @param username* @param password*/public ReleaseManager(final String connectionString, final String username, final String password){mLogger.debug("ReleaseManager {}", connectionString);mConnectionString = connectionString;mUsername = username;mPassword = password;String gbeBtDebug = System.getenv("GBE_BUILDTOOL_DEBUG");if ( gbeBtDebug != null ){mLogger.error("GBE_BUILDTOOL_DEBUG set - Use of database mutex supressed");setNoMutex();}}/*** Clone an instance of this class*/@Overridepublic Object clone() throws CloneNotSupportedException {// Clone myself - using Java's build in stuffReleaseManager clonedObj = (ReleaseManager) super.clone();// Fix up several members//clonedObj.mConnection = null;clonedObj.mReleaseConfigCollection = new ReleaseConfigData();clonedObj.mRunLevelCollection = new ArrayList<RunLevelData>();clonedObj.mPlannedPkgId = null;clonedObj.mDoNotCommit = false;clonedObj.mPlannedPkgVersion = null;return clonedObj;}/**constructor used when schema information is unknown eg location, username, password*/public ReleaseManager(){// inherit mConnectionString, mUsername, mPasswordmLogger.debug("ReleaseManager");}/*** Set the mode of operation to be either escrow mode or daemon mode** @param isDaemon True: Daemon Mode* False: Escrow Mode*/public void setDaemonMode(boolean isDaemon){mDaemon = isDaemon;mLogger.debug("DaemonMode:{}", mDaemon);}/*** Clear mUseMutex* Used only in test mode to prevent the use of a mutex*/public static void setNoMutex(){mUseMutex = false;}/*** Return the state of mUseMutex* True indicates that the system must use a Mutex to access data*/public static boolean getUseMutex(){return mUseMutex;}/** Returns the current time** Overridden in ReleaseManagerUtf*/public long currentTimeMillis(){return System.currentTimeMillis();}/** Returns the applications major version number* Used to stamp the generated XML files to ensure that producer and consumer match** Overridden in ReleaseManagerUtf*/public String getMajorVersionNumber(){return this.getClass().getPackage().getSpecificationVersion();}/**connect to oracle - in a non planning mode* Overridden in ReleaseManagerUtf*/public void connect() throws SQLException{mLogger.debug("connect");mNonPlanningConnection = connect( mNonPlanningConnection );}/**connect to oracle - to plan a build* @param priority - true: Connect with a high priority (last planning session resulted in a build)*/public void connectForPlanning( boolean priority ) throws SQLException{mLogger.debug("connectForPlanning");if ( !priority ){// limit only one thread with a low priority build requirement to connectif ( mLowPriorityQueue.isHeldByCurrentThread() ){// by design a thread must NOT connect multiple times// this is to ensure the lock is claimed only oncemLogger.error("connectForPlanning thread already has the lock");}else{mLogger.debug("connectForPlanning calling lock");mLowPriorityQueue.lock();mLogger.debug("connectForPlanning called lock");}}// threads with a high priority build requirement are not subject to the mLowPriorityQueuemPlanningConnection = connect( mPlanningConnection );}/**Connect to oracle - finally* @param session - Lock item to use* @param connection - Connection to use*/private RmConnection connect( RmConnection rmc ) throws SQLException{mLogger.debug("connect");try{if ( rmc.mSession.isHeldByCurrentThread() ){// by design a thread must NOT connect multiple times// this is to ensure the lock is claimed only oncemLogger.error("connect thread already has the lock");}else{mLogger.debug("connect calling lock");rmc.mSession.lock();mLogger.debug("connect called lock");}if ( !mUseDatabase ){mLogger.info("connect !mUseDatabase");}else{// DEVI 46868// loop indefinitely until a connection attempt succeeds// unless the failure is on the first attemptboolean problemConnecting;do{mLogger.debug("connect check connection");problemConnecting = false;try{if ( rmc.mConnection == null || ( rmc.mConnection != null && !rmc.mConnection.isValid(10) ) ){mLogger.warn("connect calling getConnection");rmc.mConnection = DriverManager.getConnection(mConnectionString, mUsername, mPassword);// when connection to the database is established, the connection, by default, is in auto-commit mode// to adhere to the design in the use of select for update, it is crucial to turn auto-commit off// this also improves performancermc.mConnection.setAutoCommit(false);}}catch(SQLException e){mLogger.warn("connect determined problem connecting");problemConnecting = true;try{// sleep 30 secsmLogger.warn("connect getConnection failed. sleep 30secs");Thread.sleep(30000);}catch (InterruptedException f){mLogger.warn("connect caught InterruptedException");Thread.currentThread().interrupt();}if ( rmc.mConnection == null ){// failed on first connection attempt - unlikely due to database loading - likely bad connection parametersthrow new SQLException();}}} while ( problemConnecting );mLogger.debug("connect checked connection");}}finally{mConnection = rmc.mConnection;mIsConnected = true;mLogger.debug("connect finally connection");}return rmc;}/**Disconnect from oracle Database* <br>Is overridden in ReleaseManagerUtf for test purposes*/public void disconnect(){mLogger.debug("disconnect");disconnect( mNonPlanningConnection );}/**Disconnect from oracle* @param priority - True: High priority planning thread*/public void disconnectForPlanning( boolean priority ){mLogger.debug("disconnectForPlanning");if ( !priority ){// allow another low priority thread to connecttry {mLowPriorityQueue.unlock();}catch( IllegalMonitorStateException e ) {mLogger.error("disconnectForPlanning. IllegalMonitorStateException exception");}}disconnect( mPlanningConnection );}/**Disconnect from oracle* Internal oracle disconnection method. Wrapped by publicly available disconnection methods* @param session. Session Lock*/private void disconnect( RmConnection connection ){mLogger.debug("disconnect");// The planning connection is flagged to close on disconnect// This is an attempt to prevent database deadlocks - which have been seen// under unknown conditions.// Done before the lock is releasedif (connection.mCloseOnDisconnect){mLogger.debug("disconnect close on disconnect");connection.closeConnection();}// by design, a thread may call disconnect multiple times// this is a technique used in finally blocks// it is to ensure the lock is released in all cases// only unlock if it is held by this thread// when unlock is called on a ReentrantLock held by this thread// the hold count is decremented// connect should only let the hold count be incremented to 1// when the hold count is 0 the lock is released// and the ReentrantLock is no longer held by this thread// only call unlock when the lock is held by this threadif ( connection.mSession.isHeldByCurrentThread() ){mLogger.debug("disconnect calling unlock");try {connection.mSession.unlock();}catch (IllegalMonitorStateException e) {mLogger.error("disconnect. IllegalMonitorStateException exception");}mLogger.debug("disconnect called unlock");}mIsConnected = false;}/** Common processing SQL Exceptions* If it is a connection error then a new SQLException will be thrown otherwise* its an error with the SQL itself and a normal Exception will be thrown.** @param e - Exception being thrown* @param instanceId - Method suffix to differentiate multiple methods of the same name** @throws SQLException* @throws Exception*/void handleSQLException(SQLException e, String instanceId ) throws SQLException, Exception{String callingMethodName = Thread.currentThread().getStackTrace()[2].getMethodName() + instanceId ;mLogger.error("{} sql exception:{}", callingMethodName, e.getMessage());if ( mConnection == null || !mConnection.isValid(10) ){mLogger.error( "{} database access error only", callingMethodName);RmConnection.closeConnection(mConnection);throw new SQLException(e);}else{mLogger.error( "{} show stopper", callingMethodName);throw new Exception(callingMethodName + " show stopper");}}/** Claim the Build System Planning Lock* Essentially locks the row in the BUILD_SERVICE_CONFIG table with a service of MUTEX* for the duration of the transaction this prevents other MasterThreads from* generating build files in parallel and hence prevents planned version numbering contention*/public void claimMutex() throws SQLException, Exception{mLogger.debug("claimMutex");mMutexState = "Claiming Mutex";if ( mUseDatabase && mUseMutex ){try{CallableStatement stmt = mConnection.prepareCall("select CONFIG from release_manager.BUILD_SERVICE_CONFIG WHERE SERVICE='MUTEX' FOR UPDATE");mLogger.error("claimMutex calling stmt.executeUpdate");stmt.executeUpdate();mLogger.error("claimMutex called stmt.executeUpdate");stmt.close();mDoNotCommit = true;mMutexState = "Holding Mutex";}catch ( SQLException e ){mMutexState = "Mutex Claim Exception";handleSQLException(e, "");}// about to start the planning process again, discard previousdiscardVersions();}}/** Release the Build System Planning Lock* Essentially unlocks the row in the BUILD_SERVICE_CONFIG table with a service of MUTEX*/public void releaseMutex() throws SQLException, Exception{mLogger.debug("releaseMutex");mMutexState = "Releasing Mutex";if ( mUseDatabase ){try{mDoNotCommit = false;mLogger.error("releaseMutex calling commit");commit();mLogger.error("releaseMutex called commit");mMutexState = "Released Mutex";}catch ( SQLException e ){mMutexState = "Mutex Release Exception";handleSQLException(e, "");}}}/**central commit protection*/private void commit() throws SQLException, Exception{mLogger.debug("commit");if ( mUseDatabase ){if ( mDoNotCommit ){mLogger.error("commit attempted commit with mDoNotCommit set, this is a programming error");}else{mConnection.commit();}}}/**Flag a Build System Pause in the database* This is aimed at stopping ALL daemons dead in the water** Used when handling an unsupported exception case in either the main or slave daemons* typically an SQLException other than a database connection related on** @param mRecoverable True: Recoverable pause wait until recovery is detected* <br>False: Unrecoverable pause. Wait for 20 minutes*/public void indefinitePause(boolean mRecoverable){mLogger.debug("indefinitePause");if ( mUseDatabase ){String sqlStr;if ( mRecoverable) {sqlStr = "begin PK_BUILDAPI.set_infinite_pause(); end;";}else {sqlStr = "begin PK_BUILDAPI.set_finite_pause(20); end;";}try{connect();CallableStatement stmt = mConnection.prepareCall( sqlStr );stmt.executeUpdate();stmt.close();commit();}catch( SQLException e ){// do not throw Exception// this is part of Exception handlingmLogger.error( "indefinitePause caught SQLException {}", e.getMessage() );RmConnection.closeConnection(mConnection);}catch( Exception e ){mLogger.error( "indefinitePause caught Exception {}", e.getMessage() );}finally{// this block is executed regardless of what happens in the try block// even if an exception is thrown// ensure disconnecttry{disconnect();}catch( Exception e ){// do not throw Exception// this is part of Exception handlingmLogger.error( "indefinitePause2 caught Exception {}", e.getMessage() );RmConnection.closeConnection(mConnection);}}}}/**ensures a run_level_schedule row with a non null indefinite_pause column does not exist* this is aimed at resuming all daemons** Assumes connection to database has been established*/public void resumeIndefinitePause() throws SQLException, Exception{mLogger.debug("resume");if ( mUseDatabase ){try{CallableStatement stmt = mConnection.prepareCall( "begin PK_BUILDAPI.SET_RESUME(); end;" );stmt.executeUpdate();stmt.close();commit();}catch ( SQLException e ){handleSQLException(e, "");}}}/*** Only used in daemon mode* Update the Database information to show the package being built** @param pkgId Identify the package (name) being built** @param pkgVersion* The version at which the package is being built** @param rtagId The release Id in which the package is being built** @exception SQLException* @exception Exception*/void claimVersion(int pkgId, String pkgVersion, int rtagId) throws SQLException, Exception{mLogger.debug("claimVersion {} {}", pkgId, pkgVersion);if ( mUseDatabase ){CallableStatement stmt1 = null;try{if (isRtagIdConfigured( rtagId )){stmt1 = mConnection.prepareCall("insert into release_manager.planned_versions (pkg_id, pkg_version, planned_time) values (?,?, sysdate)");stmt1.setInt(1, pkgId);stmt1.setString(2, pkgVersion);stmt1.executeUpdate();mPlannedPkgId = String.valueOf(pkgId);mPlannedPkgVersion = pkgVersion;}}catch ( SQLException e ){handleSQLException(e, "");}finally {if (stmt1 != null)stmt1.close();}}}/**Only used in daemon mode* <br>Delete planned package package information from the last planned build* <p>* Note: There is an aging process to cleanup entries that are left around due to* unforeseen errors.* <p>* Resets mPlannedPkgId and mPlannedPkgVersion to null** @exception SQLException* @exception Exception*/public void discardVersion() throws SQLException, Exception{mLogger.error("discardVersion");if ( mPlannedPkgId != null && mPlannedPkgVersion != null ){try{connect();CallableStatement stmt = mConnection.prepareCall("delete from release_manager.planned_versions" +" where pkg_id=" + mPlannedPkgId +" and pkg_version='" + mPlannedPkgVersion + "'");stmt.executeUpdate();stmt.close();commit();mPlannedPkgId = null;mPlannedPkgVersion = null;}catch ( SQLException e ){handleSQLException(e, "");}finally{disconnect();}}}/** Delete planned versions over 24 hours old (rounded to the nearest hour that is)* Only used in daemon mode*/private void discardVersions() throws SQLException, Exception{mLogger.debug("discardVersions");try{// housekeeping whilst the daemon has the mutex// trunc(sysdate, 'hh') returns the time now rounded to the nearest hour// trunc(sysdate, 'hh') - 1 returns the time 24 hours ago rounded to the nearest hour// this statement does not return any rows when planned_time is null, though this should never be the caseCallableStatement stmt = mConnection.prepareCall("delete from release_manager.planned_versions where planned_time < trunc(sysdate, 'hh') - 1");stmt.executeUpdate();stmt.close();}catch ( SQLException e ){handleSQLException(e, "");}}/**Update the Database information to show that a package is being built* Only used in daemon mode** @param rconId - Identifies the Release Config table entry* @param pkgId - Identifies the name of the package being built* @param pkgPvId - Identifies the packageVersion the build is based upon*/public void setCurrentPackageBeingBuilt(int rconId, int pkgId, int pkgPvId) throws SQLException, Exception{mLogger.error("setCurrentPackageBeingBuilt {}", rconId);if ( mUseDatabase ){CallableStatement stmt = null;try{connect();stmt = mConnection.prepareCall("update release_manager.run_level rl"+" set current_pkg_id_being_built=?"+" ,current_pv_id=?" +" ,last_build=sysdate" +" where rl.rcon_id=?");stmt.setInt(1, pkgId);stmt.setInt(2, pkgPvId);stmt.setInt(3, rconId);stmt.executeUpdate();commit();}catch ( SQLException e ){handleSQLException(e, "");}finally{// this block is executed regardless of what happens in the try block// even if an exception is thrown// ensure disconnectif (stmt != null)stmt.close();disconnect();}}}/**Update the Database information to show that no package is being built* Only used in daemon mode** @param rconId - Identifies the Release Config table entry*/public void clearCurrentPackageBeingBuilt(int rconId) throws SQLException, Exception{mLogger.error("clearCurrentPackageBeingBuilt {}", rconId);if ( mUseDatabase ){try{connect();CallableStatement stmt4 = mConnection.prepareCall("update release_manager.run_level" +" set current_pkg_id_being_built=NULL,"+" current_pv_id=NULL," +" current_build_files=NULL" +" where rcon_id=" + rconId);stmt4.executeUpdate();stmt4.close();commit();}catch ( SQLException e ){handleSQLException(e, "");}finally{// this block is executed regardless of what happens in the try block// even if an exception is thrown// ensure disconnectdisconnect();}}}/*** Executes the AutoMakeRelease stored procedure with the passed parameters* Handles database connection/disconnection** @param mReporting** @return A publishing error* True: A publishing error that only affect the current package* False: No publishing error* @exception SQLException* @exception Exception A publishing error that affects the entire build system.* This will cause an 'indefinite pause'*/public boolean autoMakeRelease(ReportingData mReporting) throws SQLException, Exception{mReporting.errMsg = null;mLogger.debug("autoMakeRelease {}", mReporting.packageName);if ( mUseDatabase ){try{// Create an array of dependencies// Convert the string mReporting.packageDepends into an array// mReporting.packageDepends is of the form 'PackageName','PackageVersion';'PackageName2','PackageVersion2'String [] depArrayData = mReporting.packageDepends.split(";");ArrayDescriptor desc = ArrayDescriptor.createDescriptor("RELEASE_MANAGER" + "." + "RELMGR_VARCHAR2_TAB_T", mConnection);Array depArray = new oracle.sql.ARRAY(desc, mConnection, depArrayData);connect();CallableStatement stmt = mConnection.prepareCall( "begin ? := PK_RMAPI.AUTO_MAKE_VCSRELEASE2(?,?,?,?,?,?,?,?); end;" );stmt.registerOutParameter( 1, Types.INTEGER);stmt.setLong( 2, mReporting.rtagId );stmt.setString( 3, mReporting.packageName );stmt.setString( 4, mReporting.packageExtension );stmt.setString( 5, mReporting.packageVersion );stmt.setString( 6, mReporting.newVcsTag );stmt.setArray ( 7, depArray );stmt.setInt ( 8, mReporting.isRipple ? 1 : 0 );stmt.setString( 9, "buildadm" );stmt.executeUpdate();int result = stmt.getInt( 1 );//// Return values// >0 PVID of package// -1 Package not found in pending table// -2 Package already exists// -3 Not approved for auto build// -4 Package Migrated to SVN being built from CC tag// -5 Rippled Package: Source path changed// Sql Application Errors cause an SQLException// Rtagid is NULL// No Package Name// No Package Version// No Package VCS// Bad IsRipple value// No User Name// Malformed VCS Tag// Database missing VCS tag// Invalid UserName////// Report per-package errors directly// Exceptions are for errors that need to halt the entire system//if ( result <= 0 ){mLogger.error("autoMakeRelease PK_RMAPI.AUTO_MAKE_VCSRELEASE failed, returned {}", result);if ( result == -4 ) {mReporting.errMsg = "Package migrated to SVN being built from CC tag";} else if ( result == -3 ) {mReporting.errMsg = "Package not approved for autobuild";} else if ( result == -2 ) {// This is OK} else if ( result == -1 ) {mReporting.errMsg = "Package Version no longer pending";} else if ( result == -5 ) {mReporting.errMsg = "VCS Source path changed in ripple build";} else {// Don't know this error - so its fatalthrow new Exception("autoMakeRelease show stopper PK_RMAPI.AUTO_MAKE_VCSRELEASE failed, returned " + result);}}else{// Now that the package-version has been created in the database, we have the pv_id of the// new package. Use this in the reporting process.mReporting.packageVersionId = result;}stmt.close();commit();}catch ( SQLException e ){handleSQLException(e, "");}finally{// this block is executed regardless of what happens in the try block// even if an exception is thrown// ensure disconnectdisconnect();}}return (mReporting.errMsg != null);}/** Executes the insertPackageMetrics stored procedure with the passed parameters* Handles database connection/disconnection*/public void insertPackageMetrics(int mRtagId, String packageName,String packageExtension, String metrics) throws SQLException, Exception{mLogger.debug("insertPackageMetrics {}", packageName);if ( mUseDatabase ){try{connect();CallableStatement stmt = mConnection.prepareCall( "begin ? := PK_RMAPI.INSERT_PACKAGE_METRICS(?,?,?,?); end;" );stmt.registerOutParameter( 1, Types.INTEGER);stmt.setInt ( 2, mRtagId );stmt.setString( 3, packageName );stmt.setString( 4, packageExtension );stmt.setString( 5, metrics );stmt.executeUpdate();int result = stmt.getInt( 1 );if ( result != 0 ){// flag build failuremLogger.error("insertPackageMetrics show stopper PK_RMAPI.INSERT_PACKAGE_METRICS failed, returned {}", result);throw new Exception("insertPackageMetrics show stopper PK_RMAPI.INSERT_PACKAGE_METRICS failed, returned" + result);}stmt.close();commit();}catch ( SQLException e ){handleSQLException(e, "");}finally{// this block is executed regardless of what happens in the try block// even if an exception is thrown// ensure disconnectdisconnect();}}}/*** Add entry to the Build_Instance table* Returns the build instance number. This will be used to cross reference unit tests results** @throws throws SQLException, Exception**/public int createBuildInstance(int rtagId, int pvId, BuildReason reason) throws SQLException, Exception{int buildId = 0;mLogger.debug("createBuildInstance {}:{}", rtagId, pvId);if ( mUseDatabase ){try{connect();CallableStatement stmt = mConnection.prepareCall("begin ? := PK_RMAPI.new_build_instance(?,?,?); end;");stmt.registerOutParameter( 1, Types.INTEGER);stmt.setLong( 2,rtagId );stmt.setLong( 3,pvId );stmt.setString( 4,reason.toString() );stmt.executeUpdate();buildId = stmt.getInt( 1 );mLogger.warn("createBuildInstance: Build Instance ID: {}", buildId);stmt.close();commit();}catch ( SQLException e ){handleSQLException(e, "");}finally{// this block is executed regardless of what happens in the try block// even if an exception is thrown// ensure disconnectdisconnect();}}return buildId;}/*** Update the Build_Instance table.* <br>Record the state of the current build in the database** @param buildId - Build ID of build* @param pvId - PV_ID of the build* @param result - Outcome of the build** Returns the internal error code. <0 == error** @throws throws SQLException, Exception**/public int updateBuildInstance(int buildID, int pvId, BuildResult result) throws SQLException, Exception{int rv = 0;mLogger.debug("updateBuildInstance {}:{}:{}", buildID, pvId, result);if ( mUseDatabase ){try{connect();String sql = "begin ? := PK_RMAPI.update_build_instance(?,?,?); end;";CallableStatement stmt = mConnection.prepareCall(sql);stmt.registerOutParameter( 1, Types.INTEGER);stmt.setLong( 2,buildID );stmt.setLong( 3,pvId );stmt.setString( 4,result.toString() );stmt.executeUpdate();rv = stmt.getInt( 1 );mLogger.warn("updateBuildInstance: Result: {}", rv);stmt.close();commit();}catch ( SQLException e ){handleSQLException(e, "");}finally{// this block is executed regardless of what happens in the try block// even if an exception is thrown// ensure disconnectdisconnect();}}return rv;}/*** Insert Test Results into Release Manager* Manage connection and disconnection** @param buildId* @param btr* @throws Exception, SQLException*/public void insertTestResults(int buildId, BuildTestResults btr) throws Exception, SQLException{if (buildId <= 0){mLogger.warn("insertTestResults: Invalid build Id");return;}if ( !mUseDatabase || !btr.mResultsFound ){return;}mLogger.warn("insertTestResults: {}, Number:{}", buildId, btr.mTestResults.size() );try{connect();String sql = "BEGIN ? := PK_RMAPI.insert_test_run(?, ?, ?, ?, ?, ?, ?);end;";CallableStatement stmt = mConnection.prepareCall(sql);stmt.registerOutParameter( 1, Types.INTEGER);stmt.setLong(2, buildId);Clob myClob = mConnection.createClob();for (int ii = 0; ii < btr.mTestResults.size(); ii++){//System.out.println("Data: " +data.testResults.get(ii).toString());BuildTestResults.testResultData td = btr.mTestResults.get(ii);if (td.platform != null){stmt.setString(3, td.testName);stmt.setString(4, td.outcome);stmt.setString(5, td.platform);stmt.setString(6, td.type);if (td.duration == null) {stmt.setNull(7, Types.NUMERIC);} else {stmt.setLong (7, td.duration);}// Insert CLOB data, or a nullif (td.message == null){stmt.setNull(8,Types.CLOB);}else{myClob.truncate(0);myClob.setString(1, td.message);stmt.setClob(8, myClob);}stmt.execute();int rv = stmt.getInt( 1 );if (rv != 1){mLogger.error("insertTestResults show stopper. Insert error");throw new Exception("insertTestResults show stopper. Insert error");}}}stmt.close();}catch ( SQLException e ){handleSQLException(e, "");}finally{// this block is executed regardless of what happens in the try block// even if an exception is thrown// ensure disconnectdisconnect();}}/**executes the get_daemon_inst function with the passed parameters* @param di - Daemon Instruction Control Data Information** @return true if an instruction exists - for use in a while loop*/public boolean getDaemonInst(DaemonInstruction di ) throws SQLException, Exception{mLogger.debug("getDaemonInst {}", di.instruction);boolean retVal = false;if ( mUseDatabase ){try{CallableStatement stmt = mConnection.prepareCall( "begin ? := PK_BUILDAPI.GET_DAEMON_INST(?,?,?,?,?,?,?); end;" );stmt.registerOutParameter(1, Types.INTEGER);stmt.registerOutParameter(3, Types.INTEGER);stmt.registerOutParameter(4, Types.INTEGER);stmt.registerOutParameter(5, Types.INTEGER);stmt.registerOutParameter(6, Types.INTEGER);stmt.registerOutParameter(7, Types.INTEGER);stmt.setInt(2, di.rtag_id );stmt.setInt( 3, di.instruction );stmt.setInt( 4, di.opCode );stmt.setInt(8, di.expired ? 1 : 0);stmt.execute();int result = stmt.getInt( 1 );if ( result == 1 ){retVal = true;di.instruction = stmt.getInt( 3 );di.pvId = stmt.getInt( 5 );di.userId = stmt.getInt( 6 );//// Convert userId into an email address//CallableStatement stmt1 = mConnection.prepareCall("select user_email from release_manager.users where user_id=" + di.userId);ResultSet rset1 = stmt1.executeQuery();while( rset1.next() ){di.userEmail = rset1.getString("user_email");if (rset1.wasNull()){di.userEmail = "";}}rset1.close();stmt1.close();}stmt.close();}catch ( SQLException e ){handleSQLException(e, "");}}return retVal;}/** Mark a Daemon Instruction as in-progress* Assumes that a database connection has been established* Assumes that the connection is within a Mutex session and the database commit will be done elsewhere** @param instruction - PVID of the daemon instruction to process*/public void markDaemonInstInProgress(final int instruction) throws SQLException, Exception{mLogger.debug("markDaemonInstInProgress {}", instruction);if ( mUseDatabase ){try{CallableStatement stmt = mConnection.prepareCall( "call PK_BUILDAPI.MARK_DAEMON_INST_IN_PROGRESS(?)" );stmt.setInt( 1, instruction );stmt.executeUpdate();stmt.close();}catch ( SQLException e ){handleSQLException(e, "");}}}/** Mark a Daemon Instruction as completed* Will establish (and release) a database connection if non is currently open* Will commit the change - unless commits have been disabled within a Mutex session** @param instruction - PVID of Daemon Instruction to process*/public void markDaemonInstCompleted(final int instruction) throws SQLException, Exception{mLogger.debug("markDaemonInstCompletedConnect {}", instruction);boolean connectionCreated = false;try{if (! mIsConnected){connect();connectionCreated = true;}if ( mUseDatabase ){try{CallableStatement stmt = mConnection.prepareCall( "call PK_BUILDAPI.MARK_DAEMON_INST_COMPLETED(?)" );stmt.setInt( 1, instruction );stmt.executeUpdate();stmt.close();}catch ( SQLException e ){handleSQLException(e, "");}}if ( ! mDoNotCommit ){commit();}}catch ( SQLException e ){handleSQLException(e, "");}finally{// this block is executed regardless of what happens in the try block// even if an exception is thrown// ensure disconnectif (connectionCreated){disconnect();}}}/*** Determine the RTAG_ID associated with a given SBOM* Sets up mRtagId and mSbomId** Overridden in ReleaseManagerUtf** @param sbom_id An sbom_id to process** @return The SBOM's RTAG_ID, or zero if none was found* @exception SQLException*/public int queryRtagIdForSbom(int sbom_id) throws SQLException{int rtagId = 0;CallableStatement stmt = mConnection.prepareCall("select b.rtag_id_fk from deployment_manager.boms b where b.bom_id=" + sbom_id);ResultSet rset = stmt.executeQuery();while( rset.next() ){rtagId = rset.getInt("rtag_id_fk");}rset.close();stmt.close();return rtagId;}/**In daemon mode* Returns a concatenation of the proj_name and rtag_name** In escrow mode* Returns a concatenation of the proj_name, branch_name, bom_version and bom_lifecycle** Overridden in ReleaseManagerUtf*/public String queryBaselineName(int baseline) throws SQLException, Exception{mLogger.debug("queryBaselineName {}", mDaemon);StringBuilder retVal = new StringBuilder();String sql = "";if ( mDaemon ){sql = "select p.proj_name, rt.rtag_name" +" from release_manager.projects p, release_manager.release_tags rt" +" where rt.rtag_id=" + baseline + " and p.proj_id=rt.proj_id";}else{sql = "select dp.proj_name, br.branch_name, b.bom_version, b.bom_lifecycle" +" from deployment_manager.dm_projects dp, deployment_manager.branches br, deployment_manager.boms b" +" where b.bom_id=" + baseline + " and br.branch_id=b.branch_id and dp.proj_id=br.proj_id";}try{CallableStatement stmt = mConnection.prepareCall(sql);ResultSet rset = stmt.executeQuery();while( rset.next() ){String proj_name = rset.getString("proj_name");if ( proj_name != null ){retVal.append(proj_name);}if ( mDaemon ){String rtag_name = rset.getString("rtag_name");if ( rtag_name != null ){retVal.append(" > ").append(rtag_name);}}else{String branch_name = rset.getString("branch_name");if ( branch_name != null ){retVal.append(" > ").append(branch_name);}String bom_version = rset.getString("bom_version");if ( bom_version != null ){retVal.append(" ").append(bom_version);}String bom_lifecycle = rset.getString("bom_lifecycle");if ( bom_lifecycle != null ){retVal.append(".").append(bom_lifecycle);}}}rset.close();stmt.close();}catch ( SQLException e ){handleSQLException(e, "");}mLogger.info("queryBaselineName returned {}", retVal);return retVal.toString();}/**only used in daemon mode* select config from release_manager.build_service_config where service='MAIL SERVER';* returns the configured service** Overridden in ReleaseManagerUtf**/public String queryMailServer() throws SQLException, Exception{mLogger.debug("queryMailServer");String retVal = "";try{CallableStatement stmt = mConnection.prepareCall("select config from release_manager.build_service_config where service='MAIL SERVER'");ResultSet rset = stmt.executeQuery();while( rset.next() ){String config = rset.getString("config");if ( config != null ){retVal = config;break;}}rset.close();stmt.close();}catch ( SQLException e ){handleSQLException(e, "");}mLogger.info("queryMailServer returned {}", retVal);return retVal;}/**only used in daemon mode* returns the configured service** Overridden in ReleaseManagerUtf**/public String queryMailSender() throws SQLException, Exception{mLogger.debug("queryMailSender");String retVal = "";try{CallableStatement stmt = mConnection.prepareCall("select config from release_manager.build_service_config where service='BUILD FAILURE MAIL SENDER'");ResultSet rset = stmt.executeQuery();while( rset.next() ){String config = rset.getString("config");if ( config != null ){retVal = config;break;}}rset.close();stmt.close();}catch ( SQLException e ){handleSQLException(e, "");}mLogger.debug("queryMailSender returned {}", retVal);return retVal;}/**only used in daemon mode* returns the configured global email addresses** Overridden in ReleaseManagerUtf*/public String queryGlobalAddresses() throws SQLException, Exception{mLogger.debug("queryGlobalAddresses");String retVal = "";try{CallableStatement stmt = mConnection.prepareCall("select u.user_email from release_manager.build_service_config bsc, release_manager.users u " +"where bsc.service='GLOBAL EMAIL ADDRESS LIST' and u.full_name=bsc.config");ResultSet rset = stmt.executeQuery();while( rset.next() ){String email = rset.getString("user_email");if ( email != null ){retVal = email;break;}}rset.close();stmt.close();}catch ( SQLException e ){handleSQLException(e, "");}mLogger.debug("queryGlobalAddresses returned {}", retVal);return retVal;}/*** Determine a list of global, project wide and release email recipients* All emails will be sent to people on this list* @param baseline - an rtag_id* @throws SQLException* @returns String vector of email addresses*/public List<String> queryProjectEmail(int baseline) throws SQLException{//// Get Global and Project Wide email information//mLogger.debug("queryGlobalandProjectEmail");ArrayList<String> emailCollection = new ArrayList<String>();CallableStatement stmt0 = mConnection.prepareCall("select u.user_email " +"from release_manager.autobuild_failure af, " +"release_manager.members_group mg, " +"release_manager.users u, " +"release_manager.views v, " +"release_manager.release_tags rt " +"where rt.rtag_id=" + baseline + " " +"and v.view_name='PROJECT WIDE' " +"and af.proj_id=rt.proj_id " +"and af.view_id=v.view_id " +"and mg.group_email_id=af.group_email_id " +"and u.user_id=mg.user_id");ResultSet rset0 = stmt0.executeQuery();while( rset0.next() ){String email = rset0.getString("user_email");if ( email != null ){email = email.trim();emailCollection.addAll( Arrays.asList(email.split("\\s*[,\\s]+\\s*")));}}rset0.close();stmt0.close();// Fetch the Release Specific email address// It may be comma separatedCallableStatement stmt1 = mConnection.prepareCall("select owner_email from release_manager.release_tags rt where rt.rtag_id = " + baseline);ResultSet rset1 = stmt1.executeQuery();while( rset1.next() ){String email = rset1.getString("owner_email");if ( email != null ){email = email.trim();emailCollection.addAll( Arrays.asList(email.split("\\s*[,\\s]+\\s*")));}}rset1.close();stmt1.close();return emailCollection;}/** Clears the database entry of the build file* <br>The assumption is that the file has been collected from the database and stored for use** <p>sets CURRENT_BUILD_FILES to NULL for the rcon_id*/public void clearBuildFile(int rcon_id) throws SQLException, Exception{mLogger.debug("clearBuildFile");try{connect();CallableStatement stmt = mConnection.prepareCall("update release_manager.run_level" +" set current_build_files=NULL" +" where rcon_id=" + rcon_id);stmt.executeUpdate();stmt.close();commit();}catch ( SQLException e ){handleSQLException(e, "");}finally{// this block is executed regardless of what happens in the try block// even if an exception is thrown// ensure disconnectdisconnect();}}/** Stores a buildfile in the database so that all daemons can collect it* updates the CURRENT_BUILD_FILES for the rtag_id* This will trigger the slave build cycle* Also sets the pkg_id and pvId of the package being built* This is only used for display purposes in Release manager** @param rtag_id - Target Release* @param buildFile - buildfile content to be saved*/public void publishBuildFile(int rtag_id, BuildFile buildFile) throws SQLException, Exception{mLogger.debug("publishBuildFile publishing a build file of length {}", buildFile.content.length());mLogger.debug("publishBuildFile publishing pkgId: {} pvid: {}", buildFile.mPkgId, buildFile.mPvId);try{connect();if ( isRtagIdConfigured( rtag_id ) ){PreparedStatement stmt = mConnection.prepareStatement("update release_manager.run_level " +" set current_build_files=?, " +" current_pkg_id_being_built=?,"+" current_pv_id=?," +" last_build=sysdate" +" WHERE rcon_id in ( " +" select rl.rcon_id from release_manager.release_config rc, " +" release_manager.run_level rl " +" where rc.rtag_id=? and rl.rcon_id=rc.rcon_id )" );stmt.setString(1, buildFile.content);if (buildFile.mPkgId == 0) {stmt.setNull(2, Types.NUMERIC);} else {stmt.setInt (2, buildFile.mPkgId);}if (buildFile.mPvId == 0) {stmt.setNull(3, Types.NUMERIC);} else {stmt.setInt (3, buildFile.mPvId);}stmt.setInt(4, rtag_id);stmt.executeUpdate();stmt.close();commit();}}catch ( SQLException e ){handleSQLException(e, "");}catch ( Exception e ){// this catch and rethrow is historical// problems were found using CallableStatement when updating a CLOB column with data > 4000 bytesmLogger.error("publishBuildFile caught Exception {}", e.getMessage());throw new Exception("publishBuildFile caught Exception " + e.getMessage());}finally{// this block is executed regardless of what happens in the try block// even if an exception is thrown// ensure disconnectdisconnect();}}/**only used in daemon mode* <br>Query the Release Sequence Number for the specified Release, while ensuing that the machine* is still a part of the current build set** @param rtagId Release Identifier* @return <0 - No longer a part of the Build Set* <br> >=0 - Release Sequence Number**/public int queryReleaseSeqNum(int rtagId, int rcon_id, String machine_hostname) throws SQLException, Exception{mLogger.debug("queryReleaseSeqNum");int retVal = 0;if ( mUseDatabase ){try{connect();mLogger.info("queryReleaseSeqNum queryReleaseSeqNum");CallableStatement stmt = mConnection.prepareCall("SELECT NVL(rl.pause,0) AS pause, " +"(select seqnum from release_modified where rtag_id = "+ rtagId +") as seqnum" +" FROM run_level rl," +" release_config rc," +" BUILD_MACHINE_CONFIG bmc" +" WHERE rl.rcon_id = " + rcon_id +" AND rl.RCON_ID = rc.RCON_ID" +" AND rc.BMCON_ID IS NOT NULL" +" AND rc.BMCON_ID = bmc.BMCON_ID" +" AND rc.DAEMON_MODE = 'M'" +" AND UPPER(bmc.MACHINE_HOSTNAME) = UPPER('"+machine_hostname+"')");ResultSet rset = stmt.executeQuery();if( rset.next() ){int pause = rset.getInt("pause");if (pause > 1)retVal = -2;elseretVal = rset.getInt("seqnum");}else{retVal = -1;}rset.close();stmt.close();}catch ( SQLException e ){handleSQLException(e, "");}finally{disconnect();}}mLogger.warn("queryReleaseSeqNum returned {}", retVal);return retVal;}/*** Disable all the daemons for a specific release* Should only be done by the master* Used when master thread discovers that no build has occurred for a long time** @param rtag_id* @throws SQLException* @throws Exception*/private void disableDaemons(final int rtag_id) throws SQLException, Exception{mLogger.error("disableDaemons: {}", rtag_id );try{CallableStatement stmt1 = mConnection.prepareCall( "call PK_BUILDAPI.set_daemon_states(?,2)" );stmt1.setInt( 1, rtag_id );stmt1.executeUpdate();stmt1.close();commit();}catch ( SQLException e ){handleSQLException(e, "");}}/**Checks the existence of any valid daemon configuration for the Release** A valid configuration record has a release_config entry that is linked to* a machine_configuration entry. ie: unlinked entries are not part of a configured set.** @param rtag_id - Release Tag to test* @return True if the Release still has a configuration entry*/private boolean isRtagIdConfigured(final int rtag_id) throws SQLException, Exception{mLogger.debug("isRtagIdConfigured");boolean retVal = false;try{// check if the rcon_id is still configuredCallableStatement stmt = mConnection.prepareCall("select rtag_id"+ " from release_manager.release_config"+ " where rtag_id=" + rtag_id+ " AND bmcon_id is not NULL");ResultSet rset = stmt.executeQuery();while( rset.next() ){retVal = true;}rset.close();stmt.close();}catch ( SQLException e ){handleSQLException(e, "");}mLogger.info("isRtagIdConfigured returning {}", retVal);return retVal;}/**queries the RUN_LEVEL table for daemon to determine if the daemon has been paused* or disabled.** Used as a part of allowedToProceed()** Handles the following conditions* Daemon has be de-configured - Returns TRUE* Daemon has been paused - Returns FALSE* Daemon has been disabled - Returns TRUE* Daemon has been set to run - Returns TRUE** @param rcon_id Connection identifier* @returns False: Build agent has been commanded to PAUSE* <br>True: All other conditions** @exception SQLException* @exception Exception*/public boolean queryDirectedRunLevel(final int rcon_id) throws SQLException, Exception{mLogger.debug("queryDirectedRunLevel {}", rcon_id);boolean retVal = true;if ( mUseDatabase ){try{CallableStatement stmt = mConnection.prepareCall("select NVL(rl.pause,0) as pause" +" from release_manager.run_level rl, release_manager.release_config rc "+" where rl.rcon_id=" + rcon_id +" and rl.RCON_ID = rc.RCON_ID" +" and rc.BMCON_ID is not NULL");ResultSet rset = stmt.executeQuery();int rsetSize = 0;while( rset.next() ){rsetSize++;//// Pause: null -> 0 == Run// 1 == Pause// 2 == Disabled//int pause = rset.getInt("pause");if ( pause == 1) {retVal = false;}}rset.close();stmt.close();if ( rsetSize > 1 ){mLogger.error("queryDirectedRunLevel rsetSize > 1");// show stopperthrow new Exception("queryDirectedRunLevel rsetSize > 1");}}catch ( SQLException e ){handleSQLException(e, "");}}mLogger.info("queryDirectedRunLevel returning {}", retVal);return retVal;}/*** queries the RELEASE_CONFIG and BUILD_MACHINE_CONFIG tables using the rcon_id primary key, rtag_id, machine_hostname, daemon_mode* <p>Return true if the query contains a result set containing one row* (indicating the rcon_id is still configured and its configuration is unchanged, aside from the gbe_buildfilter)* the gbe_buildfilter is queried prior to usage** <p>Used to determine if this daemon should be allowed to continue running* or if it should be terminated** <br>Overridden in ReleaseManagerUtf** @param rtag_id The rtag_id of the machine to examine* @param rcon_id The rcond_id of the machine to examine* @param machine_hostname The hostname of the machine to examine* @param daemon_mode The daemon mode of the machine to examine* @param threadStartTime The time that the thread started** @return True: The machine is a part of the current build set* False: The machine is not a part of the current build set* @exception SQLException* @exception Exception*/public boolean queryReleaseConfig(final int rtag_id, final int rcon_id,final String machine_hostname, final char daemon_mode, final long threadStartTime) throws SQLException, Exception{mLogger.debug("queryReleaseConfig 1");boolean retVal = false;try{String sql ="select rl.pause," +" rt.build_age," +" TRUNC((SYSDATE-rl.last_build) ) as last_build_days" +" from release_manager.release_config rc," +" release_manager.release_tags rt," +" release_manager.run_level rl," +" release_manager.build_machine_config bc" +" where rc.rtag_id=?" +" and rc.rcon_id=?" +" and rc.bmcon_id is not NULL" +" and rc.bmcon_id = bc.bmcon_id" +" and bc.machine_hostname=?" +" and rc.daemon_mode=?" +" and rl.rcon_id=rc.rcon_id" +" and rt.rtag_id=rc.rtag_id" +" and (rt.official = 'N' or rt.official='R' or rt.official='C')";CallableStatement stmt = mConnection.prepareCall( sql );stmt.setFetchSize(10);stmt.setInt(1, rtag_id);stmt.setInt(2, rcon_id);stmt.setString(3, machine_hostname);stmt.setString(4, Character.toString(daemon_mode));RmResultSet rset = new RmResultSet(stmt.executeQuery(),"queryReleaseConfig 1");int rsetSize = 0;while( rset.next() ){rsetSize++;//// Pause: null -> 0 == Run// 1 == Pause// 2 == Disabled//int pause = rset.getInt("pause",0);int build_age = rset.getInt("build_age", 0);int days_since_last_build = rset.getInt("last_build_days", 0);//// Determine if we should disable the daemons on this release because they have not been// used in a long while.//// Only Master makes the decision// If build_age is zero - then the daemon does not get aged out// If the days since last build > build_age, then we will shut down// Allow one days grace - will show up IN RM for a day// Allow the thread to run for x hours before shutting it down// Handles case where daemon has just been started/restarted//if ( daemon_mode == 'M' && build_age > 0 && days_since_last_build > (build_age + 1) ){long upTimeSecs = (System.currentTimeMillis() - threadStartTime)/1000;mLogger.error("queryReleaseConfig 1: {},{},{},{},{},{},{}",rtag_id, rcon_id, daemon_mode, pause, build_age, days_since_last_build, upTimeSecs );if ( upTimeSecs > mPostponeShutdown){disableDaemons(rtag_id);pause = 2;}else{mLogger.error("queryReleaseConfig 1: Shutdown Postponed" );}}mLogger.info("queryReleaseConfig 1: {},{},{},{},{},{}",rtag_id, rcon_id, daemon_mode, pause, build_age, days_since_last_build );if ( pause <= 1 ){retVal = true;}}rset.close();stmt.close();if ( rsetSize > 1 ){mLogger.error("queryReleaseConfig 1 rsetSize > 1");// show stopperthrow new Exception("queryReleaseConfig 1 rsetSize > 1");}}catch ( SQLException e ){handleSQLException(e, ":1");}mLogger.info("queryReleaseConfig 1 returning {}", retVal);return retVal;}/**removes all elements from the mReleaseConfigCollection* handles database connection and disconnection* queries the RELEASE_CONFIG and BUILD_MACHINE_CONFIG tables using the rtag_id** populates the mReleaseConfigCollection with the query result set* partially implements the sequence diagrams coordinate slave threads generate build files** Used by Master Thread to determine the build machines that will be a part* of the Slave-Sync process.** Only called when a database connection has been established*/public void queryReleaseConfig(final int rtag_id) throws SQLException, Exception{mLogger.debug("queryReleaseConfig 2");mReleaseConfigCollection.resetData();try{//connect();CallableStatement stmt = mConnection.prepareCall(" select rc.rcon_id,"+" rc.daemon_mode,"+" rl.pause,"+" bc.machine_hostname,"+" rc.gbe_buildfilter,"+" mt.gbe_value,"+" bm.bm_name" +" from release_manager.release_config rc, " +" release_manager.run_level rl, " +" release_manager.build_machine_config bc, " +" release_manager.gbe_machtype mt," +" release_manager.build_machines bm" +" where rc.rtag_id=?" +" and rl.rcon_id=rc.rcon_id " +" and rc.bmcon_id is not NULL" +" and rc.bmcon_id = bc.bmcon_id" +" and mt.gbe_id=bc.gbe_id" +" and mt.bm_id=bm.bm_id");stmt.setFetchSize(10);stmt.setInt(1, rtag_id);RmResultSet rset = new RmResultSet(stmt.executeQuery(),"queryReleaseConfig 2");while( rset.next() ){int rcon_id = rset.mustGetInt("rcon_id");char dm = 'S';String daemon_mode = rset.getString("daemon_mode","S");mLogger.info("queryReleaseConfig 2 daemon_mode '{}'", daemon_mode);if ( daemon_mode.compareTo("M") == 0 ){dm = 'M';}String machine_hostname = rset.mustGetString("machine_hostname");String gbe_buildfilter = rset.getString("gbe_buildfilter","");String gbe_machtype = rset.mustGetString("gbe_value");String gbe_machclass = rset.mustGetString("bm_name");//// Pause: null -> 0 == Run// 1 == Pause// 2 == Disabled//int pause = rset.getInt("pause",0);mLogger.info("queryReleaseConfig 2: " + rtag_id + ", " + rcon_id + ", "+ dm + ", " + pause +", " + machine_hostname + ", " + gbe_buildfilter + ", " + gbe_machtype + ", + " + gbe_machclass );//// Daemon Mode : Do not include build daemons that are disabled// Escrow Mode : Include all machines//if ( pause <= 1 || ! mDaemon ){ReleaseConfig releaseConfig = new ReleaseConfig( rtag_id, rcon_id, dm, machine_hostname, gbe_buildfilter, gbe_machtype, gbe_machclass);mReleaseConfigCollection.add(releaseConfig);}}rset.close();stmt.close();}catch ( SQLException e ){handleSQLException(e, ":2");}finally{// this block is executed regardless of what happens in the try block// even if an exception is thrown// ensure disconnect//disconnect();}}/**removes all elements from the mReleaseConfigCollection* handles database connection and disconnection* queries the RELEASE_CONFIG and BUILD_MACHINE_CONFIG table using the machine_hostname* populates the mReleaseConfigCollection with the query result set* partially implements the sequence diagram spawn thread** Used by the BuildDaemon thread to determine daemons to start and stop*/public void queryReleaseConfig(final String hostname) throws SQLException, Exception{mLogger.debug("queryReleaseConfig 3 {}", hostname);mReleaseConfigCollection.resetData();try{connect();CallableStatement stmt = mConnection.prepareCall("select rc.rtag_id,"+" rc.rcon_id,"+" rc.daemon_mode,"+" rl.pause,"+" bc.machine_hostname,"+" rc.gbe_buildfilter,"+" mt.gbe_value,"+" bm.bm_name" +" from release_manager.release_config rc," +" release_manager.release_tags rt," +" release_manager.run_level rl," +" release_manager.build_machine_config bc," +" release_manager.gbe_machtype mt," +" release_manager.build_machines bm " +" where bc.machine_hostname=?" +" and rt.rtag_id=rc.rtag_id" +" and rl.rcon_id=rc.rcon_id" +" and rc.bmcon_id is not NULL" +" and bc.bmcon_id=rc.bmcon_id" +" and mt.gbe_id=bc.gbe_id" +" and mt.bm_id=bm.bm_id" +" and (rt.official = 'N' or rt.official='R' or rt.official='C')");stmt.setFetchSize(20);stmt.setString(1, hostname);RmResultSet rset = new RmResultSet(stmt.executeQuery(),"queryReleaseConfig 3");while( rset.next() ){int rtag_id = rset.mustGetKeyInt("rtag_id");int rcon_id = rset.mustGetInt("rcon_id");char dm = 'S';String daemon_mode = rset.getString("daemon_mode", "S");mLogger.info("queryReleaseConfig 3 daemon_mode '{}'", daemon_mode);if ( daemon_mode.compareTo("M") == 0 ){dm = 'M';}String machine_hostname = rset.mustGetString("machine_hostname");String gbe_buildfilter = rset.getString("gbe_buildfilter",null);String gbe_machtype = rset.mustGetString("gbe_value");String gbe_machclass = rset.mustGetString("bm_name");//// Pause: null -> 0 == Run// 1 == Pause// 2 == Disabled//int pause = rset.getInt("pause",0);mLogger.info("queryReleaseConfig 3: " + rtag_id + ", " + rcon_id + ", "+ dm + ", " + pause +", " + machine_hostname + ", " + gbe_buildfilter + ", " + gbe_machtype + ", + " + gbe_machclass );//// Do not include build daemons that are disabled// Only those that are running or paused//if ( pause <= 1 ){ReleaseConfig releaseConfig = new ReleaseConfig( rtag_id, rcon_id, dm, machine_hostname, gbe_buildfilter, gbe_machtype, gbe_machclass);mReleaseConfigCollection.add(releaseConfig);}}rset.close();stmt.close();}catch ( SQLException e ){handleSQLException(e, ":3");}finally{// this block is executed regardless of what happens in the try block// even if an exception is thrown// ensure disconnectdisconnect();}}/** Check that the build set has exactly one master** Ignores entries that are disabled* Ignore entries that have been 'unlinked' - they have no active machine configuration** @param rtag_id - Release Tag** Used by the Master and Slave Daemons* Overridden in ReleaseManagerUtf*/public int queryMasterCount(final int rtag_id) throws SQLException, Exception{mLogger.debug("queryMasterCount rtag_id {}", rtag_id);int masterCount = 0;try{connect();CallableStatement stmt = mConnection.prepareCall("SELECT COUNT (rc.daemon_mode) as masterCount" +" FROM release_manager.release_config rc, release_manager.run_level rl" +" WHERE rc.rtag_id =" + rtag_id +" AND rl.RCON_ID = rc.RCON_ID" +" AND rc.bmcon_id is not NULL" +" AND NVL(rl.PAUSE,0) != 2" +" GROUP BY rc.daemon_mode" +" HAVING rc.daemon_mode = 'M'" );ResultSet rset = stmt.executeQuery();if ( rset.next() ){masterCount = rset.getInt("masterCount");}rset.close();stmt.close();}catch ( SQLException e ){handleSQLException(e, "");}finally{// this block is executed regardless of what happens in the try block// even if an exception is thrown// ensure disconnectdisconnect();}return masterCount;}/*** Determine an array of machines for which there are active build requests* Used to notify slave machines of a pending build request*** @param mHostname - Identifies the build machine* @param rconIdList - Comma separated list of RCON_IDs that we are interested in* @return An array of Integers, each being the rcon_id of a machine that has an active build* @throws Exception*/public ArrayList<Integer> queryActivatedBuilds(String mHostname, String rconIdList) throws Exception {ArrayList<Integer> rv = new ArrayList<Integer>();try{connect();PreparedStatement stmt = mConnection.prepareStatement("SELECT rl.rcon_id, rc.RTAG_ID" +" FROM run_level rl, release_config rc, release_tags rt" +" WHERE rt.rtag_id = rc.RTAG_ID" +" AND UPPER(rc.daemon_hostname) = UPPER('"+mHostname+"')" +" AND rc.DAEMON_MODE != 'M'" +" AND rc.RCON_ID = rl.rcon_id" +" AND CURRENT_BUILD_FILES IS NOT NULL" +" AND rl.pause is null" +" AND rl.rcon_id in ("+rconIdList+")");ResultSet rset = stmt.executeQuery();while (rset.next()) {Integer ii = rset.getInt("rcon_id");rv.add(ii);}rset.close();stmt.close();}catch ( SQLException e ){handleSQLException(e, "");}finally{// this block is executed regardless of what happens in the try block// even if an exception is thrown// ensure disconnectdisconnect();}return rv;}/*** Extract all package-version information for released packages* Used in both Escrow and non-Escrow modes** @param rippleEngine - Instance to use* @param packageCollection - Package Collection structure to fill. Expected to be cleared* @param baseline - RtagId of the release being processed, BomId of an Escrow being processed** Overridden in ReleaseManagerUtf**/protected void queryPackageVersions(RippleEngine rippleEngine, ArrayList<Package> packageCollection, int baseline) throws SQLException, Exception{Phase phase = new Phase("rmData");mLogger.debug("queryPackageVersions {}", mDaemon);try{if (mDaemon){// Get package information on ALL released packages within the release of interest//mLogger.debug("queryPackageVersions: stmt8");phase.setPhase("getAllPkgs1");PreparedStatement stmt8 = mConnection.prepareStatement("SELECT rc.pv_id," +" p.pkg_id," +" p.pkg_name," +" pv.pkg_version," +" pv.v_ext," +" pv.ripple_field," +" pv.major_limit," +" pv.minor_limit," +" pv.patch_limit," +" pv.build_number_limit," +" pv.build_type," +" rc.sdktag_id," +" rc.ripple_stop," +" peg.pv_id as pegged," +" release_manager.PK_RMAPI.return_vcs_tag(rc.pv_id) AS vcsTag," +" pv.build_time" +" FROM release_manager.release_content rc," +" release_manager.package_versions pv," +" release_manager.packages p," +" release_manager.pegged_versions peg" +" WHERE rc.rtag_id= " + baseline +" AND pv.pv_id = rc.pv_id" +" AND p.pkg_id = pv.pkg_id" +" AND peg.rtag_id(+) = rc.rtag_id" +" AND peg.pv_id(+) = rc.pv_id" +" ORDER BY rc.pv_id");mLogger.debug("queryPackageVersions: stmt8 Prepared");stmt8.setFetchSize(1000);RmResultSet rset8 = new RmResultSet(stmt8.executeQuery(), "queryPackageVersions rset8");mLogger.debug("queryPackageVersions: stmt8 Query Done");while( rset8.next() ){int pv_id = rset8.mustGetKeyInt("pv_id");int pkg_id = rset8.mustGetInt("pkg_id");String pkg_name = rset8.mustGetString("pkg_name");String pkg_version = rset8.mustGetString("pkg_version");String v_ext = rset8.getString("v_ext", "");String ripple_field = rset8.getString("ripple_field", "b");int major_limit = rset8.getInt("major_limit",0);int minor_limit = rset8.getInt("minor_limit", 0);int patch_limit = rset8.getInt("patch_limit",0);int build_number_limit = rset8.getInt("build_number_limit", 0);String vcs_tag = rset8.getString("vcsTag","");int isAnSdk = rset8.getInt("sdktag_id",0);int isPegged = rset8.getInt("pegged",0);String buildType = rset8.getString("build_type", null);boolean isBuildable = ( buildType != null ) && (buildType.equals("A") || buildType.equals("Y") );int buildTime = rset8.getInt("build_time", 60);String rippleStopData = rset8.getString("ripple_stop","n");char rippleStop = rippleStopData.charAt(0);Package p = new Package(pkg_id, pv_id, pkg_name, pkg_version, v_ext, pkg_name + v_ext, vcs_tag, ripple_field.charAt(0));p.mMajorLimit = major_limit;p.mMinorLimit = minor_limit;p.mPatchLimit = patch_limit;p.mBuildLimit = build_number_limit;p.mIsSdk = isAnSdk > 0;p.mIsPegged = isPegged > 0;p.mIsBuildable = isBuildable;p.mRippleStop = rippleStop;p.mBuildTime = buildTime;// If this package is to be replaced by a planned package then// insert some of the current packages attributes// Otherwise, add the the package to the package collection////TODO Sort out this bit of logicPackage plannedPackage = findPackage(p.mAlias, packageCollection);if ( plannedPackage == NULL_PACKAGE ){mLogger.info("queryPackageVersions rset8 no planned package {}", pv_id);packageCollection.add(p);}else{// Copy these flags from the package that will be replaced// Should not be able to replace a package provided by an SDKplannedPackage.mIsSdk = p.mIsSdk;// A pegged package can be replaced with a directly built package// plannedPackage.mIsPegged = p.mIsPegged;}}mLogger.debug("queryPackageVersions: stmt8 processing complete");rset8.close();stmt8.close();// get released package dependency info//mLogger.debug("queryPackageVersions: stmt9");phase.setPhase("getAllPkgs2");PreparedStatement stmt9 = mConnection.prepareStatement("select rc.pv_id as pv_id, dpv.pv_id as dpv_id, p.pkg_name, dpv.v_ext" +" from release_manager.release_content rc," +" release_manager.package_versions pv," +" release_manager.package_dependencies pd,"+" release_manager.package_versions dpv,"+" release_manager.packages p" +" where rc.rtag_id=" + baseline +" and pv.pv_id = rc.pv_id" +" and pd.pv_id=pv.pv_id" +" and dpv.pv_id=pd.dpv_id" +" and p.pkg_id=dpv.pkg_id" +" order by rc.pv_id");mLogger.debug("queryPackageVersions: stmt9 prepared");stmt9.setFetchSize(1000);RmResultSet rset9 = new RmResultSet(stmt9.executeQuery(), "queryPackageVersions rset9");mLogger.debug("queryPackageVersions: stmt9 query done");while( rset9.next() ){int pv_id = rset9.mustGetKeyInt("pv_id");int dpv_id = rset9.mustGetInt("dpv_id");String pkg_name = rset9.mustGetString("pkg_name");String v_ext = rset9.getString("v_ext","");Package p = findPackage(pv_id, packageCollection);if ( p != NULL_PACKAGE ){p.mDependencyCollection.add(pkg_name + v_ext);p.mDependencyIDCollection.add(dpv_id);}}mLogger.debug("queryPackageVersions: stmt9 processing complete");rset9.close();stmt9.close();// get released package build infomLogger.debug("queryPackageVersions: stmt10");phase.setPhase("getAllPkgs3");PreparedStatement stmt10 = mConnection.prepareStatement("select rc.pv_id, bm.bm_name, bsa.bsa_name " +"from release_manager.release_content rc," +" release_manager.package_versions pv," +" release_manager.package_build_info pbi," +" release_manager.build_machines bm," +" release_manager.build_standards_addendum bsa " +"where rc.rtag_id=?" +" and pv.pv_id = rc.pv_id" +" and pbi.pv_id=pv.pv_id" +" and bm.bm_id=pbi.bm_id" +" and bsa.bsa_id=pbi.bsa_id " +"order by rc.pv_id");stmt10.setFetchSize(1000);stmt10.setInt(1, baseline);RmResultSet rset10 = new RmResultSet(stmt10.executeQuery(), "queryPackageVersions rset10");while( rset10.next() ){int pv_id = rset10.mustGetKeyInt("pv_id");String bm_name = rset10.mustGetString("bm_name");String bsa_name = rset10.mustGetString("bsa_name");Package p = findPackage(pv_id, packageCollection);if ( p != NULL_PACKAGE ){BuildStandard bs = new BuildStandard(rippleEngine, bm_name, bsa_name);if ( bs.supportedBuildStandard() ){p.mBuildStandardCollection.add(bs);}}}rset10.close();stmt10.close();// get released package unit test infomLogger.debug("queryPackageVersions: stmt11");phase.setPhase("getAllPkgs4");PreparedStatement stmt11 = mConnection.prepareStatement("select rc.pv_id, tt.test_type_name " +"from release_manager.release_content rc, release_manager.package_versions pv, release_manager.unit_tests ut, release_manager.test_types tt " +"where rc.rtag_id=" + baseline +" and pv.pv_id = rc.pv_id and ut.pv_id=pv.pv_id and tt.test_type_id=ut.test_types_fk " +"order by rc.pv_id");stmt11.setFetchSize(1000);RmResultSet rset11 = new RmResultSet(stmt11.executeQuery(), "queryPackageVersions rset11");while( rset11.next() ){int pv_id = rset11.mustGetKeyInt("pv_id");String test_type_name = rset11.mustGetString("test_type_name");Package p = findPackage(pv_id, packageCollection);if ( p != NULL_PACKAGE ){if ( test_type_name.compareTo("Autobuild UTF") == 0 ){p.mHasAutomatedUnitTests = true;}}}rset11.close();stmt11.close();// get released package build failure info...// view basedmLogger.debug("queryPackageVersions: stmt12");phase.setPhase("getAllPkgs5");PreparedStatement stmt12 = mConnection.prepareStatement("SELECT rc.pv_id," +" u.user_email" +" FROM release_manager.release_content rc," +" release_manager.release_tags rt," +" release_manager.package_versions pv," +" release_manager.autobuild_failure af," +" release_manager.members_group mg," +" release_manager.users u" +" WHERE rc.rtag_id = " + baseline +" AND rt.rtag_id = rc.rtag_id" +" AND pv.pv_id = rc.pv_id" +" AND af.view_id = rc.base_view_id" +" AND mg.group_email_id= af.group_email_id" +" AND u.user_id = mg.user_id" +" AND af.proj_id = rt.proj_id" +" ORDER BY rc.pv_id");stmt12.setFetchSize(1000);RmResultSet rset12 = new RmResultSet(stmt12.executeQuery(), "queryPackageVersions rset12");while( rset12.next() ){int pv_id = rset12.mustGetKeyInt("pv_id");String user_email = rset12.getString("user_email",null);Package p = findPackage(pv_id, packageCollection);if ( p != NULL_PACKAGE ){p.addEmail(user_email);}}rset12.close();stmt12.close();// get released advisory ripple infomLogger.debug("queryPackageVersions: stmt14");phase.setPhase("getAllPkgs6");PreparedStatement stmt14 = mConnection.prepareStatement("select rc.pv_id " +"from release_manager.release_content rc, release_manager.package_versions pv, release_manager.advisory_ripple ar " +"where rc.rtag_id=" + baseline +" and pv.pv_id = rc.pv_id and ar.rtag_id=rc.rtag_id and ar.pv_id=rc.pv_id " +"order by rc.pv_id");stmt14.setFetchSize(1000);RmResultSet rset14 = new RmResultSet(stmt14.executeQuery(), "queryPackageVersions rset14");while( rset14.next() ){int pv_id = rset14.mustGetInt("pv_id");Package p = findPackage(pv_id, packageCollection);if ( p != NULL_PACKAGE ){p.mAdvisoryRipple = true;}}rset14.close();stmt14.close();}else{// Escrow Mode// get released product infomLogger.debug("queryPackageVersions: stmt18");PreparedStatement stmt = mConnection.prepareStatement("select oc.prod_id, p.pkg_name, pv.pkg_version, pv.v_ext," +"release_manager.PK_RMAPI.return_vcs_tag(pv.pv_id) AS vcsTag" +" from deployment_manager.bom_contents bc," +"deployment_manager.operating_systems os," +"deployment_manager.os_contents oc," +"release_manager.package_versions pv," +"release_manager.packages p" +" where bc.bom_id=" + baseline +" and os.node_id=bc.node_id" +" and oc.os_id=os.os_id" +" and pv.pv_id=oc.prod_id" +" and p.pkg_id=pv.pkg_id" +" order by oc.prod_id");stmt.setFetchSize(1000);RmResultSet rset = new RmResultSet(stmt.executeQuery(), "queryPackageVersions rset");while( rset.next() ){int pv_id = rset.mustGetKeyInt("prod_id");String pkg_name = rset.mustGetString("pkg_name");String pkg_version = rset.mustGetString("pkg_version");String v_ext = rset.getString("v_ext", "");String vcs_tag = rset.getString("vcsTag","");Package p = findPackage(pv_id, packageCollection);if ( p == NULL_PACKAGE ){Package q = new Package(0, pv_id, pkg_name, pkg_version, v_ext, pkg_name + "." + pkg_version, vcs_tag, 'x');packageCollection.add(q);}}rset.close();stmt.close();// Now have all the package-versions used in the SBOM need to// determine all the dependent packages// determine the build info for all the packages// Determine all dependent packages// Note: use a ListIterator as it allows traverseDependencies to modify the packageCollectionfor (ListIterator<Package> it = packageCollection.listIterator(); it.hasNext(); ){Package p = it.next();traverseDependencies(packageCollection, p, false, it);}// Add required build informationfor (Iterator<Package> it = packageCollection.iterator(); it.hasNext(); ){Package p = it.next();queryBuildInfo(rippleEngine, p);}}}catch ( SQLException e ){handleSQLException(e, "");}phase.setPhase("End");}/*** Extract all package-version information for Planned Packages (WIPS)* Not used in ESCROW** @param rippleEngine - Instance to use* @param packageCollection - Package Collection structure to fill. Expected to be cleared* @param baseline - RtagId of the release being processed, BomId of an Escrow being processed** Overridden in ReleaseManagerUtf**/protected void queryWips(RippleEngine rippleEngine, ArrayList<Package> packageCollection, int baseline) throws SQLException, Exception{if (!mDaemon){return;}Phase phase = new Phase("rmData");mLogger.debug("queryWips {}", mDaemon);try{// Get planned package info// Support multiple WIPS on the same package and build in the order they were released// These are packages that are marked as pending build//mLogger.debug("queryPackageVersions: stmt1");phase.setPhase("getPlannedPkgs1");PreparedStatement stmt1 = mConnection.prepareStatement("select pl.pv_id, pv.modified_stamp," +" from release_manager.planned pl," +" release_manager.package_versions pv," +" release_manager.packages p" +" where pl.rtag_id=" + baseline +" and pv.build_type='A' and pv.dlocked='A'" +" and pv.pv_id=pl.pv_id and p.pkg_id=pv.pkg_id" +" order by pv.modified_stamp");stmt1.setFetchSize(500);RmResultSet rset1 = new RmResultSet(stmt1.executeQuery(), "queryPackageVersions rset1");while( rset1.next() ){int pvId = rset1.mustGetKeyInt("pv_id");Package p = getPackageInfo(pvId, rippleEngine);if ( p != NULL_PACKAGE){p.mIsNotReleased = true;p.mDirectlyPlanned = true;p.mBuildReason = BuildReason.NewVersion;// If there are multiple packages with the same Alias, then only the first one// will be placed in the build set. The will be the oldest, the first one released// to be builtPackage prevPlannedPackage = findPackage(p.mAlias, packageCollection);if ( prevPlannedPackage == NULL_PACKAGE ){mLogger.info("queryWips rset1 no previous WIP package {}", pvId);packageCollection.add(p);}}}rset1.close();stmt1.close();// Process Scheduled Build Approvals. OpCode is 2// These are treated as requests to not-include a 'Pending' package-version in the current build set// If there is an un-expired instruction, then remove the pending package-version from the build set//// Process all expired requests// These are scheduled builds were the scheduled time has been exceeded// Simply discard the instruction and let the Approved build proceed//DaemonInstruction di = new DaemonInstruction(baseline, 2, true);phase.setPhase("removeOldSchedBuilds");while ( getDaemonInst( di ) ){markDaemonInstCompleted(di.instruction);mLogger.info("queryPackageVersions remove Scheduled Build Approvals {}", di.pvId);}phase.setPhase("getSchedBuilds");di = new DaemonInstruction(baseline, 2, false);while ( getDaemonInst( di ) ){Package p = findPackage(di.pvId, packageCollection);if ( p != NULL_PACKAGE ){mLogger.info("queryPackageVersions Scheduled Build Approvals {}", di.pvId);packageCollection.remove(p);}else{// discard - the package no longer existsmarkDaemonInstCompleted( di.instruction );mLogger.info("queryPackageVersions remove Scheduled Build Approvals for nonexistent package {}", di.pvId);}}}catch ( SQLException e ){handleSQLException(e, "");}phase.setPhase("End");}/*** Extract all TEST package-version information* Only active in Daemon Mode** @param rippleEngine - Instance to use* @param packageCollection - Package Collection structure to fill. Expected to be cleared* @param baseline - RtagId of the release being processed, BomId of an Escrow being processed** Overridden in ReleaseManagerUtf**/protected void queryTest(RippleEngine rippleEngine, ArrayList<Package> packageCollection, int baseline) throws SQLException, Exception{if (!mDaemon){return;}Phase phase = new Phase("rmData");mLogger.debug("queryTest {}", mDaemon);try{// Daemon Instruction: Test Build Package// An op code of 1 means test build//// Trust nothing - these are mostly wips that at any time may have:// - no build location// - no build label// - an empty build standard collection// proceed with defaults above if necessary (the build will subsequently fail)// in all cases, build a meaningful email body to inform the user// of the snapshot of build information that applied to the build and store in:// - mTestBuildEmailBodymLogger.debug("queryTest: stmt141");phase.setPhase("getTestBuild1");DaemonInstruction di = new DaemonInstruction( baseline, 1, true);while ( getDaemonInst( di ) ){mLogger.info("queryPackageVersions test build data {}", di.pvId);Package p = getPackageInfo(di.pvId, rippleEngine);if ( p == NULL_PACKAGE ){mLogger.error("queryTest rset15 no data found {}", di.instruction);markDaemonInstCompleted( di.instruction );}else{packageCollection.add(p);// Set some Test information// Avoid interaction with real versions// Flag as not pegged - so that we test buildp.mVersion = "0.0.0000";p.mTestBuildInstruction = di.instruction;p.addEmail(di.userEmail);p.mBuildReason = BuildReason.Test;p.mIsPegged = false;p.mIsNotReleased = true;}}}catch ( SQLException e ){handleSQLException(e, "");}phase.setPhase("End");}/*** Extract all Ripple Request package-version information* The returned list is in daemon instruction id order - ie the first request will be first* Only active in Daemon Mode** @param rippleEngine - Instance to use* @param packageCollection - Package Collection structure to fill. Expected to be cleared* @param baseline - RtagId of the release being processed, BomId of an Escrow being processed** Overridden in ReleaseManagerUtf**/protected void queryRipples(RippleEngine rippleEngine, ArrayList<Package> packageCollection, int baseline) throws SQLException, Exception{if (!mDaemon){return;}Phase phase = new Phase("rmData");mLogger.debug("queryPackageVersions {}", mDaemon);try{// Daemon Instruction: Force Package Ripple// An op code of 0 means force ripplephase.setPhase("getDaemonInstructions1");DaemonInstruction di = new DaemonInstruction( baseline, 0, true);while ( getDaemonInst( di ) ){Package p = getPackageInfo(di.pvId, rippleEngine);if ( p != NULL_PACKAGE ){mLogger.info("queryPackageVersions forced ripple data {}", di.pvId);packageCollection.add(p);p.mForcedRippleInstruction = di.instruction;p.addEmail(di.userEmail);p.mBuildReason = BuildReason.Ripple;p.mIsNotReleased = true;}else{// discardmarkDaemonInstCompleted( di.instruction );}}}catch ( SQLException e ){handleSQLException(e, "");}phase.setPhase("End");}/** Get all build information for a single package* Don't use this where lots of packages are expected, but should be OK to get data for TEST, WIP and Ripples** Does not get:* mBuildReason* mDirectlyPlanned** @param pvId - Package to process* @param rippleEngine - Ripple engine associated wit the Release* @throws Exception** @returns a package - May be null, but this is not good*/Package getPackageInfo(int pvId, RippleEngine rippleEngine) throws Exception{Package p = NULL_PACKAGE;try{// Get Package info//mLogger.debug("getPackageInfo: stmt1");PreparedStatement stmt1 = mConnection.prepareStatement("select pv.pv_id, p.pkg_id, p.pkg_name, pv.pkg_version, " +" pv.v_ext, pv.change_type, pv.ripple_field," +" pv.major_limit, pv.minor_limit, pv.patch_limit, pv.build_number_limit," +" pv.modified_stamp," +" release_manager.PK_RMAPI.return_vcs_tag(pl.pv_id) AS vcsTag," +" pv.build_time" +" from " +" release_manager.package_versions pv," +" release_manager.packages p" +" where pv.pv_id=" + pvId +" and p.pkg_id=pv.pkg_id");stmt1.setFetchSize(500);RmResultSet rset1 = new RmResultSet(stmt1.executeQuery(), "getPackageInfo rset1");// Package must existif ( ! rset1.next() ){return NULL_PACKAGE;}int pv_id = rset1.mustGetKeyInt("pv_id");int pkg_id = rset1.mustGetInt("pkg_id");String pkg_name = rset1.mustGetString("pkg_name");String pkg_version = rset1.mustGetString("pkg_version");int buildTime = rset1.getInt("build_time", 60);// Previous Version of this package, without the project suffixString pkg_prevVersion = getBaseVersionNumber(pv_id);if ( pkg_prevVersion == null){// show stoppermLogger.error("getPackageInfo. No Previous version {}", pv_id);throw new Exception("getPackageInfo. No Previous version" + pv_id);}String v_ext = rset1.getString("v_ext","");String change_type = rset1.getString("change_type", "P");char ct = 'P';if ( change_type.compareTo("M") == 0 ){ct = 'M';}else if ( change_type.compareTo("N") == 0 ){ct = 'N';}else if ( change_type.compareTo("P") == 0 ){ct = 'P';}else if ( change_type.compareTo("F") == 0 ){ct = 'F';}String ripple_field = rset1.getString("ripple_field", "b");int major_limit = rset1.getInt("major_limit", 0);int minor_limit = rset1.getInt("minor_limit",0);int patch_limit = rset1.getInt("patch_limit",0);int build_number_limit = rset1.getInt("build_number_limit",0);String vcs_tag = rset1.getString("vcsTag", "");p = new Package(pkg_id, pv_id, pkg_name, pkg_version, v_ext, pkg_name + v_ext, vcs_tag, ripple_field.charAt(0), ct);p.mMajorLimit = major_limit;p.mMinorLimit = minor_limit;p.mPatchLimit = patch_limit;p.mBuildLimit = build_number_limit;p.mPrevVersion = pkg_prevVersion;p.mBuildTime = buildTime;rset1.close();stmt1.close();// get Package dependency infomLogger.debug("getPackageInfo: stmt2");PreparedStatement stmt2 = mConnection.prepareStatement("select dpv.v_ext, dpv.pv_id" +" from " +" release_manager.package_versions pv,"+" release_manager.package_dependencies pd," +" release_manager.package_versions dpv,"+" where pv.pv_id=" + pvId +" and pd.pv_id=pv.pv_id"+" and dpv.pv_id=pd.dpv_id");stmt2.setFetchSize(500);RmResultSet rset2 = new RmResultSet(stmt2.executeQuery(), "getPackageInfo rset2");if( rset2.next() ){String dpv_ext = rset2.mustGetKeyString("v_ext");int dpvId = rset2.mustGetInt("pv_id");p.mDependencyCollection.add(pkg_name + dpv_ext);p.mDependencyIDCollection.add(dpvId);}rset2.close();stmt2.close();// get Package build infomLogger.debug("getPackageInfo: stmt3");PreparedStatement stmt3 = mConnection.prepareStatement("select bm.bm_name, bsa.bsa_name" +"from " +" release_manager.package_versions pv," +" release_manager.package_build_info pbi," +" release_manager.build_machines bm," +" release_manager.build_standards_addendum bsa " +"where pv_id=" + pvId +" and pbi.pv_id=pv.pv_id" +" and bm.bm_id=pbi.bm_id" +" and bsa.bsa_id=pbi.bsa_id ");stmt3.setFetchSize(500);RmResultSet rset3 = new RmResultSet(stmt3.executeQuery(), "getPackageInfo rset3");if( rset3.next() ){String bm_name = rset3.mustGetString("bm_name");String bsa_name = rset3.mustGetString("bsa_name");BuildStandard bs = new BuildStandard(rippleEngine, bm_name, bsa_name);if ( bs.supportedBuildStandard() ){p.mBuildStandardCollection.add(bs);}}rset3.close();stmt3.close();// get Package unit test infomLogger.debug("getPackageInfo: stmt4");PreparedStatement stmt4 = mConnection.prepareStatement("select pv.pv_id, tt.test_type_name" +" from "+" release_manager.package_versions pv,"+" release_manager.unit_tests ut," +" release_manager.test_types tt" +" where pv.pv_id=" + pvId +" and ut.pv_id=pv.pv_id" +" and tt.test_type_id=ut.test_types_fk");stmt4.setFetchSize(500);RmResultSet rset4 = new RmResultSet(stmt4.executeQuery(), "getPackageInfo rset4");if( rset4.next() ){String test_type_name = rset4.mustGetString("test_type_name");if ( test_type_name.compareTo("Autobuild UTF") == 0 ){p.mHasAutomatedUnitTests = true;}}rset4.close();stmt4.close();// get Package build failure info...// view basedmLogger.debug("getPackageInfo: stmt5");PreparedStatement stmt5 = mConnection.prepareStatement("select pv.pv_id, u.user_email" +" from " +" release_manager.release_tags rt, " +" release_manager.package_versions pv, " +" release_manager.autobuild_failure af, " +" release_manager.members_group mg, " +" release_manager.users u " +" where pv.pv_id=" + pvId +" and rt.rtag_id=" + rippleEngine.getRtagId() +" and af.view_id=pv.view_id " +" and mg.group_email_id=af.group_email_id " +" and u.user_id=mg.user_id " +" and af.proj_id=rt.proj_id ");stmt5.setFetchSize(500);RmResultSet rset5 = new RmResultSet(stmt5.executeQuery(), "getPackageInfo rset5");if( rset5.next() ){p.addEmail(rset5.getString("user_email", null));}rset5.close();stmt5.close();// get Package build failure info...// package versionmLogger.debug("getPackageInfo: stmt6");PreparedStatement stmt6 = mConnection.prepareStatement("select pv.pv_id, u1.user_email as creator, u2.user_email as owner, u3.user_email as modifier" +" from " +" release_manager.release_tags rt," +" release_manager.package_versions pv," +" release_manager.users u1," +" release_manager.users u2," +" release_manager.users u3 " +" where pv.pv_id=" + pvId +" and rt.rtag_id=" + rippleEngine.getRtagId() +" and pv.build_type='A'" +" and pv.dlocked='A' " +" and pv.pv_id = pl.pv_id" +" and u1.user_id=pv.creator_id" +" and u2.user_id=pv.owner_id" +" and u3.user_id=pv.modifier_id");stmt6.setFetchSize(500);RmResultSet rset6 = new RmResultSet(stmt6.executeQuery(), "getPackageInfo rset6");if( rset6.next() ){p.addEmail( rset6.getString("creator",null));p.addEmail( rset6.getString("owner",null));p.addEmail( rset6.getString("modifier",null));}rset6.close();stmt6.close();// get Package advisory ripple infomLogger.debug("getPackageInfo: stmt7");PreparedStatement stmt7 = mConnection.prepareStatement("select pl.pv_id, pv.modified_stamp " +" from " +" release_manager.package_versions pv," +" release_manager.advisory_ripple ar " +" where pv.pv_id = " + pvId +" and ar.rtag_id=" + rippleEngine.getRtagId() +" and ar.pv_id=pv.pv_id ");stmt7.setFetchSize(500);RmResultSet rset7 = new RmResultSet(stmt7.executeQuery(), "getPackageInfo rset7");if( rset7.next() ){p.mAdvisoryRipple = true;}rset7.close();stmt7.close();}catch ( SQLException e ){handleSQLException(e, "");}return p;}/** Determine the version number of the base package* Used in a ripple build to determine the base for calculating the next version number* Assumes that a database connection has been established* Used in Daemon Mode Only** History: Used to determine the last non-rippled package, but this had issues [JATS-402]* This version will located the last release version with a check that it is not a WIP ( ie start with a '(' )** @param pv_id - The PVID of the package to process* @return - The version number without a project suffix. Null on error. 0.0.0000 if no previous* @throws SQLException*/public String getBaseVersionNumber(int pv_id){String baseVersion = "0.0.0000";try {CallableStatement stmt;stmt = mConnection.prepareCall("SELECT pv_id,last_pv_id,pkg_version,v_ext,build_type, DLOCKED " +" FROM " +" (SELECT build_type,last_pv_id,pv_id,pkg_version,v_ext, DLOCKED " +" FROM " +" (SELECT pv.build_type,pv.last_pv_id AS raw_last_pvid ,pv_id,pv.pkg_version,pv.v_ext,DECODE(pv.pv_id, pv.last_pv_id, NULL, pv.last_pv_id) AS last_pv_id, DLOCKED " +" FROM release_manager.package_versions pv " +" WHERE pv.PKG_ID IN " +" (SELECT pkg_id " +" FROM release_manager.package_versions pv " +" WHERE pv.pv_id = " + pv_id +" ) " +" ) " +" START WITH pv_id = " + pv_id +" CONNECT BY nocycle prior last_pv_id = pv_id " +" ) " +" WHERE DLOCKED = 'Y' and pkg_version not like '(%' AND pv_id != " + pv_id//" WHERE build_type != 'Y' and pv_id != " + pv_id);ResultSet rset = stmt.executeQuery();while( rset.next() ){// Previous Version of this package, without the project suffixString pkg_prevVersion = rset.getString("pkg_version");if (pkg_prevVersion != null){String pkg_prevVext = rset.getString("v_ext");if (pkg_prevVext != null){int endindex = pkg_prevVersion.length() - pkg_prevVext.length();if ( endindex > 0 ){baseVersion = pkg_prevVersion.substring(0, endindex);}}}// Only want the first entrybreak;}rset.close();stmt.close();} catch (SQLException e) {baseVersion = null;mLogger.error("Exception for getBaseVersionNumber {}", pv_id);}return baseVersion;}/** Called only in escrow mode** Used to process each package in the escrow list and to locate all dependent packages - recursively.** if checkCollection is true, checks the pv_id is in the packageCollection* if checkCollection is false, or the pv_id is not in the collection* Traverses the pv_id package dependencies* for each dpv.pv_id in the resultset* call traverseDependencies( packageCollection, dpv.pv_id, true )* if the pv_id is not in the collection, add it** @param packageCollection - Collection of packages being processed* @param pkg - Current package in the collection* @param checkCollection - How to handle pkg not in the collection. False: Process all package dependencies. True: Skip if package is in collection* @param listIterator - List iterator being used to iterate over packageCollection. Used to insert new packages** This function is called recursively*/private void traverseDependencies(ArrayList<Package> packageCollection, Package pkg, boolean checkCollection, ListIterator<Package> listIterator) throws SQLException, Exception{mLogger.debug("traverseDependencies {}", checkCollection);boolean pvIdInCollection = false;if ( checkCollection ){for (Iterator<Package> it = packageCollection.iterator(); it.hasNext(); ){Package p = it.next();if ( p.mId == pkg.mId ){pvIdInCollection = true;break;}}}if ( !pvIdInCollection ){ArrayList<Package> resultset = new ArrayList<Package>();try{PreparedStatement stmt = mConnection.prepareStatement("select dpv.pv_id," +"p.pkg_name," +"dpv.pkg_version," +"dpv.v_ext," +"release_manager.PK_RMAPI.return_vcs_tag(dpv.pv_id) AS vcsTag" +" from release_manager.package_versions pv," +"release_manager.package_dependencies pd," +"release_manager.package_versions dpv," +"release_manager.packages p" +" where pv.pv_id=" + pkg.mId +" and pd.pv_id=pv.pv_id" +" and dpv.pv_id=pd.dpv_id" +" and p.pkg_id=dpv.pkg_id" +" order by pv.pv_id");stmt.setFetchSize(1000);RmResultSet rset = new RmResultSet(stmt.executeQuery(),"traverseDependencies");while (rset.next()){int pv_id = rset.mustGetKeyInt("pv_id");String pkg_name = rset.mustGetString("pkg_name");String pkg_version = rset.mustGetString("pkg_version");String v_ext = rset.getString("v_ext","");String vcs_tag = rset.getString("vcsTag","");Package p = new Package(0, pv_id, pkg_name, pkg_version, v_ext, pkg_name + "." + pkg_version, vcs_tag, 'x');resultset.add(p);pkg.mDependencyCollection.add(p.mAlias);}rset.close();stmt.close();}catch ( SQLException e ){handleSQLException(e, "");}// Process each dependent packagefor (Iterator<Package> it = resultset.iterator(); it.hasNext();){Package r = it.next();traverseDependencies(packageCollection, r, true, listIterator);pvIdInCollection = false;for (Iterator<Package> it2 = packageCollection.iterator(); it2.hasNext();){Package p = it2.next();if (p.mId == r.mId){pvIdInCollection = true;break;}}if (!pvIdInCollection){// insert the Package immediately before the next Package returned by next// this does not change the next Package (if any) to be returned by nextlistIterator.add(r);}}}}/** Called only in escrow mode** <p>Add build information to a Package* <br>Adds: bm_name and bsa_name, but only if they are supported** @param rippleEngine - Ripple Engine being used* @param p - Package to process**/private void queryBuildInfo(RippleEngine rippleEngine, Package p) throws SQLException, Exception{mLogger.debug("queryBuildInfo");try{CallableStatement stmt = mConnection.prepareCall("select bm.bm_name, bsa.bsa_name "+ "from release_manager.package_versions pv,"+ " release_manager.package_build_info pbi,"+ " release_manager.build_machines bm,"+ " release_manager.build_standards_addendum bsa "+ "where pv.pv_id=" + p.mId+ " and pbi.pv_id=pv.pv_id"+ " and bm.bm_id=pbi.bm_id"+ " and bsa.bsa_id=pbi.bsa_id "+ "order by pv.pv_id");RmResultSet rset = new RmResultSet (stmt.executeQuery(),"queryBuildInfo");while (rset.next()){String bm_name = rset.mustGetKeyString("bm_name");String bsa_name = rset.mustGetString("bsa_name");BuildStandard bs = new BuildStandard(rippleEngine, bm_name, bsa_name);if (bs.supportedBuildStandard()){p.mBuildStandardCollection.add(bs);}}rset.close();stmt.close();}catch ( SQLException e ){handleSQLException(e, "");}}/*** Find Package by pvid* @param id - pvid of package to locate* @param packageCollection - Collection to scan* @return Package with the matching mID or NULL_PACKAGE if no package has the mID*/int findPackageLastId = 0;int findPackageLastIndex = 0;Package findPackageLastPackage = NULL_PACKAGE;public Package findPackage(int id, ArrayList<Package> packageCollection){mLogger.debug("findPackage id {}", id);Package retVal = NULL_PACKAGE;if (findPackageLastId != 0 && findPackageLastId == id) {mLogger.debug("findPackage id {} - cache hit", id);retVal = findPackageLastPackage;}else{int index = 0;findPackageLastIndex = -1;for (Iterator<Package> it = packageCollection.iterator(); it.hasNext(); index++ ){Package p = it.next();if ( p.mId == id ){findPackageLastId = id;findPackageLastPackage = p;findPackageLastIndex = index;retVal = p;break;}}}mLogger.debug("findPackage id {} returned {}", id, retVal.mName);return retVal;}/*** Find Package by package alias* @param alias - alias of package to locate* @param packageCollection - Collection to scan* @return Package with the matching mAlias or NULL_PACKAGE if no package has the mAlias*/protected Package findPackage(String alias, ArrayList<Package> packageCollection){mLogger.debug("findPackage alias {}", alias);Package retVal = NULL_PACKAGE;int index = 0;findPackageLastIndex = -1;for (Iterator<Package> it = packageCollection.iterator(); it.hasNext(); index++){Package p = it.next();if ( p.mAlias.compareTo( alias ) == 0 ){retVal = p;findPackageLastId = p.mId;findPackageLastPackage = p;findPackageLastIndex = index;break;}}mLogger.info("findPackage alias {} returned {}", alias, retVal.mName);return retVal;}/**only used in daemon mode to determine version existence in the database* 1 select pkg_id from release_manager.package_versions where pkg_id=<pkg_id> and pkg_version=<pkg_version>;* 2 select pkg_id from release_manager.planned_versions where pkg_id=<pkg_id> and pkg_version=<pkg_version>;* returns true if either resultset contains one record to indicate it already exists*/boolean queryPackageVersions(int pkg_id, String pkg_version) throws SQLException, Exception{mLogger.debug("queryPackageVersions");boolean retVal = false;if ( mUseDatabase ){try{mLogger.info("queryPackageVersions release_manager.package_versions");CallableStatement stmt1 = mConnection.prepareCall("select pkg_id" +" from release_manager.package_versions" +" where pkg_id=" + pkg_id +" and pkg_version='" + pkg_version + "'");ResultSet rset1 = stmt1.executeQuery();int rsetSize = 0;while( rset1.next() ){rsetSize++;}rset1.close();stmt1.close();if ( rsetSize > 1 ){String msg = "queryPackageVersions rsetSize > 1 " + pkg_id + " " + pkg_version;mLogger.error(msg);// show stopperthrow new Exception(msg);}if ( rsetSize == 1 ){retVal = true;}else{mLogger.info("queryPackageVersions release_manager.planned_versions");CallableStatement stmt2 = mConnection.prepareCall("select pkg_id" +" from release_manager.planned_versions" +" where pkg_id=" + pkg_id +" and pkg_version='" + pkg_version + "'");ResultSet rset2 = stmt2.executeQuery();rsetSize = 0;while( rset2.next() ){rsetSize++;}rset2.close();stmt2.close();if ( rsetSize > 1 ){String msg = "queryPackageVersions rsetSize > 1 " + pkg_id + " " + pkg_version;mLogger.error(msg);// show stopperthrow new Exception(msg);}if ( rsetSize == 1 ){retVal = true;}}}catch ( SQLException e ){handleSQLException(e, "");}}mLogger.info("queryPackageVersions returned {}", retVal);return retVal;}/*** Determine the set of packages that have been excluded from the build* These are packages that have been marked as do_not_ripple** Overridden in ReleaseManagerUtf** @param buildExclusionCollection* @param baseline The rtag_id of the release to examine** @exception SQLException* @exception Exception*/public void queryBuildExclusions(List<BuildExclusion> buildExclusionCollection, int baseline) throws SQLException, Exception{mLogger.debug("queryBuildExclusions {}", baseline);try{PreparedStatement stmt = mConnection.prepareStatement("select pv_id, root_pv_id, root_cause" +" from release_manager.do_not_ripple" +" where rtag_id=" + baseline);stmt.setFetchSize(1000);RmResultSet rset = new RmResultSet(stmt.executeQuery(),"queryBuildExclusions");while( rset.next() ){int pvId = rset.mustGetKeyInt("pv_id");int rootPvId = rset.getInt("root_pv_id", -1);String rootCause = rset.getString("root_cause", null);// force email notification by using a zero test build instructionBuildExclusion buildExclusion = new BuildExclusion(pvId, rootPvId, rootCause, 0);buildExclusion.setImported();buildExclusionCollection.add(buildExclusion);}rset.close();stmt.close();}catch ( SQLException e ){handleSQLException(e, "");}}/*** Execute the Exclude_Indirect_From_Build stored procedure* Note: Execute_Indirect_From_Build will delete matching do_not_ripple* rows prior to an insertion - this is crucial!!** Note: The name 'Execute_Indirect_From_Build' is misleading as it appears to be used for both direct and indirect* exclusions.** @param hasConnection IN True. Use existing connection and will not commit the operation* @param packageVersionId IN passed to Exclude_Indirect_From_Build* @param packageVersion IN passed to Exclude_Indirect_From_Build* @param rtagId IN passed to Exclude_Indirect_From_Build* @param rootPvId IN passed to Exclude_Indirect_From_Build* @param rootCause IN passed to Exclude_Indirect_From_Build* @param rootFile IN passed to Exclude_Indirect_From_Build* @param supercede IN checks for a row with a matching packageVersionId and rtagId when false* such a row will prevent the execution of Exclude_Indirect_From_Build* @param testBuildInstruction IN will prevent the execution of Exclude_Indirect_From_Build true** @exception SQLException* @exception Exception*/public void excludeFromBuild(boolean hasConnection,int packageVersionId,String packageVersion,int rtagId,String rootPvId,String rootCause,String rootFile,boolean supercede, Boolean testBuildInstruction) throws SQLException, Exception{mLogger.debug("excludeFromBuild {}", packageVersionId);// If a Test Build, then don't excluded packageif ( testBuildInstruction ){return;}if ( mUseDatabase ){try{if (!hasConnection)connect();boolean exist = false;boolean dbIsIndirect = false;boolean entryIsIndirect = (rootPvId != null);if ( !supercede ){// do not exclude a package already excluded ie let the first build failure count// there is a window of opportunity here, but it is worth doing// scenario 1// 1 this query indicates no build failure exists on this version// 2 another build machine reports a build failure on this version// 3 this is then overridden by this thread// does not matter// doing this works well for the following// scenario 2// 1 this query (run by a slave) indicates no build failure exists on this version// 2 build failure is reported// 3 master build machine detects slave in state waiting// 4 master daemon discovers slave did not deliver artifacts// 5 master daemon is prevented from overriding the build failure//// Another complication// If this is a direct exclusion (rootPvid) is null and the entry in the table is an indirect exclusion// then we should delete the indirect exclusion and replace it with a direct exclusion//CallableStatement stmt = mConnection.prepareCall("select pv_id, root_pv_id from release_manager.do_not_ripple where pv_id=" + packageVersionId + "and rtag_id=" + rtagId);ResultSet rset = stmt.executeQuery();while( rset.next() ){exist = true;rset.getInt("root_pv_id");dbIsIndirect = ! rset.wasNull();// Override an indirect database entry with a direct exclusionif ( dbIsIndirect && ! entryIsIndirect ) {exist = false;}break;}rset.close();stmt.close();}if ( !exist ){CallableStatement stmt = mConnection.prepareCall( "begin ? := PK_RMAPI.EXCLUDE_INDIRECT_FROM_BUILD(?,?,?,?,?,?,?); end;" );stmt.registerOutParameter( 1, Types.INTEGER);stmt.setInt ( 2, packageVersionId );stmt.setString ( 3, packageVersion );stmt.setInt ( 4, rtagId );stmt.setString ( 5, "buildadm" );stmt.setString ( 6, rootPvId);stmt.setString ( 7, rootCause);stmt.setString ( 8, rootFile);stmt.executeUpdate();int result = stmt.getInt( 1 );if ( result != 0 ){// flag build failuremLogger.error( "excludeFromBuild show stopper PK_RMAPI.EXCLUDE_INDIRECT_FROM_BUILD failed, returned {}", result );throw new Exception("excludeFromBuild show stopper PK_RMAPI.EXCLUDE_INDIRECT_FROM_BUILD failed, returned " + result);}stmt.close();if (!hasConnection )commit();}}catch ( SQLException e ){handleSQLException(e, "");}finally{// this block is executed regardless of what happens in the try block// even if an exception is thrown// ensure disconnectif (!hasConnection )disconnect();}}}/*** Removes an excluded package from the do_not_ripple table, thereby including the package back into the build set.* The method does not remove packages that were manually added to the DNR table, as those* packages are not in the domain of the buildtool.** @param packageVersionId Packages PVID* @param rtagId The RTAG id of the release to consider** @exception SQLException* @exception Exception*/public void includeToBuild(int packageVersionId, int rtagId) throws SQLException, Exception{mLogger.debug("includeToBuild {}", packageVersionId);if ( mUseDatabase ){try{CallableStatement stmt = mConnection.prepareCall("delete from release_manager.do_not_ripple " +" where rtag_id=" + rtagId +" and pv_id=" + packageVersionId +" and (root_pv_id is not NULL or root_cause is not NULL or root_file is not NULL)");stmt.executeUpdate();stmt.close();}catch ( SQLException e ){handleSQLException(e, "");}}}/**queries the RUN_LEVEL table using the rcon_id primary key* handles database connection and disconnection* returns the current_build_files* implements the sequence diagram consume build files** Used by the Slave Daemon** @param rcon_id - My release config id* @return The buildFile content. Will be null if none is found*/public String queryBuildFile(int rcon_id) throws SQLException, Exception{String buildFile = null;mLogger.debug("queryRunLevel 1 rcon_id {}", rcon_id);if ( !mUseDatabase ){mLogger.info("queryRunLevel 1 !mUseDatabase");buildFile = "unit test build file content";}else{try{connect();CallableStatement stmt = mConnection.prepareCall("select current_build_files from release_manager.run_level where rcon_id=" + rcon_id);ResultSet rset = stmt.executeQuery();int rsetSize = 0;while( rset.next() ){rsetSize++;buildFile = rset.getString("current_build_files");}if ( rsetSize > 1 ){mLogger.error("queryRunLevel 1 rsetSize > 1");// show stopperthrow new Exception("queryRunLevel 1 rsetSize > 1");}rset.close();stmt.close();}catch ( SQLException e ){handleSQLException(e, "");}finally{// this block is executed regardless of what happens in the try block// even if an exception is thrown// ensure disconnectdisconnect();}}return buildFile;}/** Updates mRunLevelCollection* Removes all elements from mRunLevelCollection, then queries the Database* and re-populates the structure.** Used to determine which slave daemons the master needs to wait upon.** Ignores entries that are disabled* Ignore entries that have been 'unlinked' - they have no active machine configuration** @param rtag_id - Release Tag** Used by the Master Daemon* Overridden in ReleaseManagerUtf*/public void queryRunLevel(final int rtag_id) throws SQLException, Exception{mLogger.debug("queryRunLevel 2 rtag_id {}", rtag_id);mRunLevelCollection.clear();try{connect();CallableStatement stmt = mConnection.prepareCall("SELECT rl.rcon_id," +" rl.current_run_level," +" NVL(rl.pause,0) as pause," +" rc.daemon_mode," +" NVL2(rl.CURRENT_BUILD_FILES, 1 , 0) as bfPresent" +" FROM release_manager.release_config rc," +" release_manager.run_level rl" +" WHERE rc.rtag_id = " + rtag_id +" AND rl.rcon_id = rc.rcon_id" +" AND rc.bmcon_id is not NULL");stmt.setFetchSize(20);RmResultSet rset = new RmResultSet(stmt.executeQuery(),"queryRunLevel 2");int rcon_id = 0;int current_run_level = 0;String modeString;char mode;int pause = 0;int buildFilePresent = 0;while( rset.next() ){rcon_id = rset.mustGetKeyInt("rcon_id");modeString = rset.mustGetString("daemon_mode");mode = modeString.charAt(0);buildFilePresent = rset.getInt("bfPresent",0);current_run_level = rset.getInt("current_run_level", BuildState.DB_IDLE.toValue());//// Pause: null -> 0 == Run// 1 == Pause// 2 == Disabled//pause = rset.getInt("pause", 0);// Ignore disabled entriesif ( pause <= 1 ){RunLevelData runLevel = new RunLevelData(rcon_id, current_run_level, mode, buildFilePresent);mRunLevelCollection.add(runLevel);}}rset.close();stmt.close();}catch ( SQLException e ){handleSQLException(e, "");}finally{// this block is executed regardless of what happens in the try block// even if an exception is thrown// ensure disconnectdisconnect();}}/*** Removes all elements and then re-populates mRunLevelCollection** Queries the RM database to determine the run_level of the current daemon.* Populates the mRunLevelCollection with the query result set** Used to determine if the (Slave) thread should still be running** Used by the Slave Daemon* Overridden in ReleaseManagerUtf** @param rcon_id Identify the Release_Config table entry for the daemon* @param hostname Name of the host machine* @param daemon_mode Specifies the current daemon type** @exception SQLException* Database connection errors* Bad SQL statement* @exception Exception Too many rows extracted from the database* Empty(NULL) data in database*/public void querySingleRunLevel(final int rcon_id, String hostname, char daemon_mode) throws SQLException, Exception{mLogger.debug("querySingleRunLevel rcon_id {}", rcon_id);mRunLevelCollection.clear();try{connect();CallableStatement stmt = mConnection.prepareCall("select rl.rcon_id, "+" rl.current_run_level,"+" NVL(rl.pause,0) as pause," +" NVL2(rl.CURRENT_BUILD_FILES, 1 , 0) as bfPresent" +" from release_manager.run_level rl,"+" release_manager.release_config rc" +" where rl.rcon_id = rc.rcon_id" +" AND rc.rcon_id=" + rcon_id +" AND rc.bmcon_id is not NULL" +" AND rc.daemon_mode='" + daemon_mode + "'" +" AND UPPER(rc.daemon_hostname)=UPPER('" + hostname + "')");RmResultSet rset = new RmResultSet(stmt.executeQuery(),"querySingleRunLevel");int rsetSize = 0;int current_run_level = 0;int pause = 0;int buildFilePresent = 0;while( rset.next() ){rsetSize++;current_run_level = rset.mustGetInt("current_run_level");buildFilePresent = rset.getInt("bfPresent",0);//// Pause: null -> 0 == Run// 1 == Pause// 2 == Disabled//pause = rset.getInt("pause",0);// Ignore disabled daemonsif ( pause <= 1){RunLevelData runLevel = new RunLevelData(rcon_id, current_run_level, daemon_mode, buildFilePresent);mRunLevelCollection.add(runLevel);}}rset.close();stmt.close();// Must have no more than one record// Will have none if this daemon is no longer a part of the build set//if ( rsetSize > 1 ){// show stopper// More rows than expected returned from the databasemLogger.error("querySingleRunLevel rsetSize > 1");throw new Exception("querySingleRunLevel rsetSize > 1");}}catch ( SQLException e ){handleSQLException(e, "");}finally{// this block is executed regardless of what happens in the try block// even if an exception is thrown// ensure disconnectdisconnect();}}/**Queries the RUN_LEVEL_SCHEDULE table to determine if we can proceed* <br>Will not proceed if:* <br>- Inside a scheduled downtime* <br>- Indefinite pause present** <br>Should delete rows with a non NULL indefinite pause (but doesn't appear too)** <br>Assumes connection to database has been established** @param resumeTime.value Returns date-time when daemon can run again** @return False: if a row in the query result set indicates build service downtime is scheduled* <br>False: if a row in the query result set has a non NULL indefinite_pause*/public boolean queryRunLevelSchedule(MutableDate resumeTime) throws SQLException, Exception{mLogger.debug("queryRunLevelSchedule");boolean retVal = true;if ( mUseDatabase ){//// Clean up the run_level_schedule table by deleting out of date entries//try{CallableStatement stmt = mConnection.prepareCall( "BEGIN PK_BUILDAPI.DELETE_OUT_OF_DATE_SCHEDULE; END;" );stmt.executeUpdate();stmt.close();commit();}catch ( SQLException e ){handleSQLException(e, ":1");}try{// Extract info from the database//CallableStatement stmt = mConnection.prepareCall("select scheduled_pause, scheduled_resume, repeat, indefinite_pause" +" from release_manager.run_level_schedule");ResultSet rset = stmt.executeQuery();Date now = new Date();//// Scan the database information and determine if there is any reason// to pause. Terminate the loop on the first excuse to pause// as indefinite pause may have multiple (lots) of entries in the data// base.//while( retVal && rset.next() ){//// Examine the current row from the data base// Expect one of two forms:// 1) scheduled_pause// Must also have a scheduled_resume and a repeat// 2) indefinite_pause//// Look for scheduled_pause style of entry//Timestamp sp = rset.getTimestamp("scheduled_pause");if ( sp != null ){Date scheduledPause = new Date( sp.getTime() );Timestamp sr = rset.getTimestamp("scheduled_resume");if ( sr != null ){Date scheduledResume = new Date( sr.getTime() );int repeat = rset.getInt("repeat");mLogger.info("queryRunLevelSchedule repeat {}", repeat);//// Have scheduled_pause and scheduled_resume// Examine the repeat field and determine how these are used// Supported repeat:// 0:Once Only// 1:Daily Year, Month and Day information is ignored// 7:Weekly Only day of week is utilized//if ( !rset.wasNull() ){GregorianCalendar startOfDowntime = new GregorianCalendar();GregorianCalendar endOfDowntime = new GregorianCalendar();GregorianCalendar clock = new GregorianCalendar();switch( repeat ){case 0:// Once Only// Simple check between start and end date-timesif ( scheduledPause.before(now) && scheduledResume.after(now) ){mLogger.warn("queryRunLevelSchedule one off scheduled downtime");resumeTime.value = scheduledResume;retVal = false;}break;case 1:// Daily// Create start and end times, then massage some fields// to reflect todays date.// Use start and end times from scheduled_pause and scheduled_resume//startOfDowntime.setTime(scheduledPause);endOfDowntime.setTime(scheduledResume);clock.setTime(now);// Force date fields to todays dateendOfDowntime.set ( clock.get(Calendar.YEAR), clock.get(Calendar.MONTH), clock.get(Calendar.DAY_OF_MONTH) );startOfDowntime.set( clock.get(Calendar.YEAR), clock.get(Calendar.MONTH), clock.get(Calendar.DAY_OF_MONTH) );if ( startOfDowntime.before(clock) && endOfDowntime.after(clock) ){mLogger.warn("queryRunLevelSchedule daily scheduled downtime");resumeTime.value.setTime(endOfDowntime.getTimeInMillis());retVal = false;}break;case 7:// Weekly// Create start and end times, then massage some fields// to reflect todays date.// Use DayOfWeek and time from scheduled_pause// Use time from scheduled_resume//startOfDowntime.setTime(scheduledPause);endOfDowntime.setTime(scheduledResume);clock.setTime(now);// Only interested in one day of the weekif ( startOfDowntime.get(Calendar.DAY_OF_WEEK) == clock.get(Calendar.DAY_OF_WEEK) ){endOfDowntime.set ( clock.get(Calendar.YEAR), clock.get(Calendar.MONTH), clock.get(Calendar.DAY_OF_MONTH) );startOfDowntime.set( clock.get(Calendar.YEAR), clock.get(Calendar.MONTH), clock.get(Calendar.DAY_OF_MONTH) );if ( startOfDowntime.before(clock) && endOfDowntime.after(clock) ){mLogger.warn("queryRunLevelSchedule weekly scheduled downtime");resumeTime.value.setTime(endOfDowntime.getTimeInMillis());retVal = false;}}break;default://// Unexpected valuebreak;}}}}//// Look for indefinite_pause style of entry// Note: due to an implementation error there may be many// rows that match. We only need one. The scan will// be terminated if we find any////String ip = rset.getString("indefinite_pause");if ( ip != null ){// indefinite pause is non nullmLogger.warn("queryRunLevelSchedule indefinite pause");GregorianCalendar clock = new GregorianCalendar();clock.setTime(now);// wait a minuteresumeTime.value.setTime(clock.getTimeInMillis() + 60000);retVal = false;}}rset.close();stmt.close();}catch ( SQLException e ){handleSQLException(e, ":2");}}mLogger.info("queryRunLevelSchedule returning {}", retVal);return retVal;}/**persists the runLevel in the RUN_LEVEL table for the rcon_id primary key** Overridden in the UTF** @param rcon_id Identifies the Release Connection (daemon)* @param runLevel The run level to set**/public void updateCurrentRunLevel(final int rcon_id, final int runLevel) throws SQLException, Exception{mLogger.debug("updateCurrentRunLevel");try{connect();mLogger.warn("updateCurrentRunLevel: Set Runlevel:{}, rconId:{}", runLevel, rcon_id);PreparedStatement stmt = mConnection.prepareCall("update release_manager.run_level set current_run_level=" + runLevel + ", keep_alive=SYSDATE where rcon_id=" + rcon_id);stmt.executeUpdate();stmt.close();mLogger.info("updateCurrentRunLevel: committing");commit();mLogger.info("updateCurrentRunLevel: committed");}catch ( SQLException e ){handleSQLException(e, "");}finally{// this block is executed regardless of what happens in the try block// even if an exception is thrown// ensure disconnectdisconnect();}}/** Report the current build plan* This is only for display purposes* Assume a connection has been established. Will not commit if the operation is covered by the Mutex** @param mRtagId - Release we are building* @param mBuildOrder - Ordered list of PackageVersions that we plan to build* @throws Exception* @throws SQLException*/public void reportPlan(int mRtagId, List<Package> mBuildOrder) throws SQLException, Exception {mLogger.debug("reportPlan {}", mRtagId);if ( mUseDatabase ){try{Iterator<Package> it = mBuildOrder.iterator();int fragment = 0;CallableStatement stmt = mConnection.prepareCall( "call PK_BUILDAPI.set_build_plan(?,?,?)" );do {//// Generate a comma separated list of PVIDs// Limit the length of the string since we can only pump 4000 chars into a string// Allow 10 chars per PV_ID//StringBuilder pvList = new StringBuilder();String joiner = "";while ( pvList.length() < 3000 && it.hasNext() ){Package p = it.next();pvList.append(joiner);pvList.append(p.mId);joiner = ",";}// Terminate if we have nothing to add and its not the first passif (pvList.length() <= 0 && fragment != 0){break;}//// Insert data - one fragment at a time//stmt.setInt ( 1, mRtagId );stmt.setInt ( 2, fragment );stmt.setString ( 3, pvList.toString() );stmt.executeUpdate();fragment++;} while(true);stmt.close();// Commit only if not covered by the Mutexif ( ! mDoNotCommit )commit();}catch ( SQLException e ){handleSQLException(e, "");}finally{// this block is executed regardless of what happens in the try block// even if an exception is thrown}}}public void updateBuildDuration(int pvId, int duration) throws Exception {mLogger.debug("updateBuildDuration {}:{}", pvId, duration);if ( mUseDatabase ){try{connect();String sql = "update release_manager.package_versions set BUILD_TIME = ? where pv_id = ?";PreparedStatement stmt = mConnection.prepareStatement(sql);stmt.setLong( 1,duration );stmt.setLong( 2,pvId );stmt.executeUpdate();stmt.close();commit();}catch ( SQLException e ){handleSQLException(e, "");}finally{disconnect();}}}/** Set RippleStop flag to 'w' for waiting* Indicated that the build system will Ripple the package, when the user confirms that it can be done* Assume database connection has been established* @param mRtagId** @param pkg - Package being processed* @throws Exception*/public void setRippleStopWait(int mRtagId, Package pkg) throws Exception {// Use a stored procedure as it will also do loggingmLogger.debug("setRippleStopWait");if ( mUseDatabase ){try{// "BEGIN PK_PACKAGE.Set_Ripple_Stop( :PV_ID, :RTAG_ID, :USER_ID, :RSTATE ); END;"CallableStatement stmt = mConnection.prepareCall( "BEGIN PK_PACKAGE.Set_Ripple_Stop( ?, ?, ?, 'w' ); END;" );stmt.setInt(1, pkg.mId); // PVIDstmt.setInt(2, mRtagId); // RTAG IDstmt.setInt(3, 3768); // USER ID - buildadm ( Yes I know its ugly )stmt.executeUpdate();stmt.close();}catch ( SQLException e ){handleSQLException(e, "");}}}}