Now we get each metadata entry as their own TXT record.

This commit is contained in:
Kalzu Rekku 2025-05-03 15:24:31 +03:00
parent ad449d5ac2
commit 58bbef2837

View File

@ -1094,33 +1094,28 @@ class MiniDiscoveryResolver(common.ResolverBase):
def _handle_txt_query(
self, name: bytes, base_name: str, cls: int
) -> Tuple[List, List, List]:
"""Handles TXT record lookups, returning service metadata, flattening single-segment payloads."""
"""Handles TXT record lookups, returning each piece of information as a separate TXT record."""
answers = []
instances = self._get_instances_for_query(base_name, is_srv_query=False)
MAX_TXT_STRING_LEN = 255
for instance in instances:
# --- Initialize list for the final payload segments ---
final_txt_payload_segments = []
instance_id_str = str(instance.id)
try:
print(f"DNS TXT: Processing instance {instance_id_str}")
# --- 1. Gather all logical strings first ---
logical_strings_to_encode = []
# Step 1: Collect all individual strings
txt_strings = []
# Add tags
if isinstance(instance.tags, list):
logical_strings_to_encode.extend(
[str(tag) for tag in instance.tags]
)
txt_strings.extend([str(tag) for tag in instance.tags])
else:
print(
f"WARNING: Instance {instance_id_str} tags are not a list: {type(instance.tags)}"
)
# Add metadata as key=value pairs
if isinstance(instance.metadata, dict):
logical_strings_to_encode.extend(
txt_strings.extend(
[f"{str(k)}={str(v)}" for k, v in instance.metadata.items()]
)
else:
@ -1128,104 +1123,40 @@ class MiniDiscoveryResolver(common.ResolverBase):
f"WARNING: Instance {instance_id_str} metadata is not a dict: {type(instance.metadata)}"
)
logical_strings_to_encode.append(f"instance_id={instance_id_str}")
# Add instance ID
txt_strings.append(f"instance_id={instance_id_str}")
# --- 2. Encode each logical string and split if > 255 bytes ---
for logical_string in logical_strings_to_encode:
# Step 2 & 3: Create a separate TXT record for each string
for txt_string in txt_strings:
try:
encoded_bytes = logical_string.encode("utf-8")
# Split the encoded bytes into chunks of MAX_TXT_STRING_LEN
for i in range(0, len(encoded_bytes), MAX_TXT_STRING_LEN):
chunk = encoded_bytes[i : i + MAX_TXT_STRING_LEN]
# Append each chunk as a separate item for the TXT payload
final_txt_payload_segments.append(chunk)
except Exception as enc_err:
# Handle potential errors during encoding or processing a specific string
# Encode the string to bytes
encoded_string = txt_string.encode("utf-8")
# Check length (TXT strings must be <= 255 bytes)
if len(encoded_string) > 255:
print(
f"ERROR encoding/splitting item '{logical_string}' for {instance_id_str}: {enc_err}. Skipping this item."
f"WARNING: TXT string too long, skipping: {txt_string[:50]}..."
)
continue
# --- 3. Debugging the final list of segments ---
# (Optional: Keep the debugging print statements from previous versions if needed)
print(
f"DNS TXT DEBUG: FINAL payload segments count for {instance_id_str}: {len(final_txt_payload_segments)}"
)
# ... add detailed segment logging back here if required ...
valid_payload_structure = True # Assume valid unless checks fail below
# Basic check if it's a list and contains bytes
if not isinstance(final_txt_payload_segments, list):
print(f" ERROR: final_txt_payload_segments is not a list!")
valid_payload_structure = False
elif final_txt_payload_segments and not all(
isinstance(s, bytes) for s in final_txt_payload_segments
):
print(
f" ERROR: Not all items in final_txt_payload_segments are bytes!"
)
valid_payload_structure = False
# --- 4. Create Record_TXT, FLATTENING if only one segment ---
if valid_payload_structure and final_txt_payload_segments:
num_segments = len(final_txt_payload_segments)
print(
f"DNS TXT: Attempting to create Record_TXT for instance {instance_id_str} with {num_segments} segments..."
)
# Instantiate Record_TXT by unpacking the list of segments
payload = dns.Record_TXT(
*final_txt_payload_segments, ttl=DNS_DEFAULT_TTL
)
print(
f" (Payload has {num_segments} segments, unpacked and passed to Record_TXT)"
)
print(
f"DNS TXT: Record_TXT created successfully for {instance_id_str}."
)
rr = dns.RRHeader(
# Create a TXT record with a single string
txt_record = dns.Record_TXT(
encoded_string, ttl=60
) # Adjust TTL as needed
rr_header = dns.RRHeader(
name=name,
type=dns.TXT,
cls=cls,
ttl=DNS_DEFAULT_TTL,
payload=payload,
)
answers.append(rr)
print(
f"DNS TXT: RRHeader created and added for instance {instance_id_str}."
ttl=60, # Adjust TTL as needed
payload=txt_record,
)
answers.append(rr_header)
print(f"DNS TXT: Added record: {txt_string}")
elif not final_txt_payload_segments:
print(
f"DNS TXT: Skipping record creation for {instance_id_str} due to empty payload."
)
else: # valid_payload_structure must be False
print(
f"DNS TXT ERROR: Skipping record creation for {instance_id_str} due to invalid payload structure."
)
except Exception as e:
print(f"ERROR: Failed to create TXT record for {txt_string}: {e}")
# --- Error Handling (Catch errors during the DNS object creation itself) ---
except TypeError as te_dns:
# This might still catch errors if the multi-segment pathway also fails
print(
f"FATAL DNS TypeError creating TXT record objects for {instance_id_str}: {te_dns}"
)
print(
" This could indicate an issue even with multi-segment lists, or the flattened single segment."
)
traceback.print_exc()
except Exception as e_dns:
print(
f"ERROR creating TXT DNS objects for {instance_id_str}: {e_dns.__class__.__name__}: {e_dns}"
)
traceback.print_exc()
# Log the final result before returning
print(
f"DNS TXT: Finished processing query for '{base_name}'. Found {len(instances)} instances, generated {len(answers)} TXT records."
)
print(f"DNS TXT: Generated {len(answers)} TXT records for '{base_name}'")
return answers, [], []