Small fixes based on the PR comments

Signed-off-by: yottta <andrei.ciobanu@opentofu.org>
This commit is contained in:
yottta 2025-02-21 09:24:18 +02:00
parent 5c26b53089
commit 06a173261b
2 changed files with 6 additions and 7 deletions

View File

@ -287,6 +287,7 @@ func (c *RemoteClient) Lock(info *statemgr.LockInfo) (string, error) {
info.ID = lockID
}
info.Path = c.lockPath()
if err := c.s3Lock(info); err != nil {
return "", err
@ -306,7 +307,6 @@ func (c *RemoteClient) dynamoDBLock(info *statemgr.LockInfo) error {
if c.ddbTable == "" {
return nil
}
info.Path = c.lockPath()
putParams := &dynamodb.PutItemInput{
Item: map[string]dtypes.AttributeValue{
@ -340,7 +340,6 @@ func (c *RemoteClient) s3Lock(info *statemgr.LockInfo) error {
if !c.useLockfile {
return nil
}
info.Path = c.lockPath()
lInfo := info.Marshal()
putParams := &s3.PutObjectInput{
@ -507,10 +506,10 @@ func (c *RemoteClient) getLockInfoFromS3(ctx context.Context) (*statemgr.LockInf
lockContent, err := io.ReadAll(resp.Body)
if err != nil {
return nil, fmt.Errorf("could not read the content of the lock object: %w", err)
return nil, fmt.Errorf("could not read the content of the lock object %q from bucket %q: %w", c.lockFilePath(), c.bucketName, err)
}
if len(lockContent) == 0 {
return nil, fmt.Errorf("no lock info found for %q in the s3 bucket: %s", c.lockFilePath(), c.bucketName)
return nil, fmt.Errorf("empty lock info found for %q in the s3 bucket: %s", c.lockFilePath(), c.bucketName)
}
lockInfo := &statemgr.LockInfo{}

View File

@ -366,10 +366,10 @@ To migrate from DynamoDB to S3 locking, the following steps can be followed:
1. The new attribute `use_lockfile=true` can be added alongside `dynamodb_table`:
* With both attributes specified, OpenTofu will try to acquire the lock first in S3 and if successful, will try to acquire the lock in DynamoDB. In this case, the lock will be considered acquired only when both (S3 and DynamoDB) locks were acquired successfully.
* Later, after a baking period with both locking mechanisms enabled, if no issues encountered, remove the `dynamodb_table` attribute. Now, you are solely on the S3 locking.
* **Info:** Letting both locking mechanisms enabled, ensures that nobody will acquire the lock regardless of having or not the latest configuration.
* **Info:** Keeping both locking mechanisms enabled, ensures that nobody will acquire the lock regardless of having or not the latest configuration.
2. The new attribute `use_lockfile=true` can be added and `dynamodb_table` removed:
* This will just switch from DynamoDB to S3 locking. **Caution:** when the updated configuration is executed from multiple places (multiple machines, pipelines on PRs, etc), you might get into issues where one outdated copy of the configuration is using DynamoDB locking and the one updated is using S3 locking. This could end up in concurrent access on the same state file.
* Once the state is updated by using this approach, the state digest that OpenTofu was storing in DynamoDB (for data consistency checks) will get stale. If it is wanted to go back to DynamoDB locking, **the old digest needs to be cleaned up manually**.
* This will switch from DynamoDB to S3 locking. **Caution:** when the updated configuration is executed from multiple places (multiple machines, pipelines on PRs, etc), you might get into issues where one outdated copy of the configuration is using DynamoDB locking and the one updated is using S3 locking. This could end up in concurrent access on the same state file.
* Once the state is updated by using this approach, the state digest that OpenTofu was storing in DynamoDB (for data consistency checks) will get stale. If you wish to go back to DynamoDB locking, **the old digest needs to be cleaned up manually**.
:::note
Remember, any changes to the `backend` block will require to run `tofu init -reconfigure`.